예제 #1
0
class PoolResilverService(ConfigService):
    class Config:
        namespace = 'pool.resilver'
        datastore = 'storage.resilver'
        datastore_extend = 'pool.resilver.resilver_extend'

    async def resilver_extend(self, data):
        data['begin'] = data['begin'].strftime('%H:%M')
        data['end'] = data['end'].strftime('%H:%M')
        data['weekday'] = [int(v) for v in data['weekday'].split(',')]
        return data

    async def validate_fields_and_update(self, data, schema):
        verrors = ValidationErrors()

        begin = data.get('begin')
        if begin:
            data['begin'] = time(int(begin.split(':')[0]),
                                 int(begin.split(':')[1]))

        end = data.get('end')
        if end:
            data['end'] = time(int(end.split(':')[0]), int(end.split(':')[1]))

        weekdays = data.get('weekday')
        if weekdays:
            if len([day for day in weekdays if day not in range(1, 8)]) > 0:
                verrors.add(
                    f'{schema}.weekday',
                    'The week days should be in range of 1-7 inclusive')
            else:
                data['weekday'] = ','.join([str(day) for day in weekdays])

        return verrors, data

    @accepts(
        Dict('pool_resilver', Str('begin', validators=[Time()]),
             Str('end', validators=[Time()]), Bool('enabled'),
             List('weekday', items=[Int('weekday')])))
    async def do_update(self, data):
        config = await self.config()
        original_config = config.copy()
        config.update(data)

        verrors, new_config = await self.validate_fields_and_update(
            config, 'pool_resilver_update')
        if verrors:
            raise verrors

        # before checking if any changes have been made, original_config needs to be mapped to new_config
        original_config['weekday'] = ','.join(
            [str(day) for day in original_config['weekday']])
        original_config['begin'] = time(
            *(int(value) for value in original_config['begin'].split(':')))
        original_config['end'] = time(
            *(int(value) for value in original_config['end'].split(':')))
        if len(set(original_config.items()) ^ set(new_config.items())) > 0:
            # data has changed
            await self.middleware.call('datastore.update',
                                       self._config.datastore,
                                       new_config['id'], new_config)

            await self.middleware.call('service.restart', 'cron',
                                       {'onetime': False})
            await self.middleware.call('pool.configure_resilver_priority')

        return await self.config()
예제 #2
0
class NetworkGeneralService(Service):

    class Config:
        namespace = 'network.general'
        cli_namespace = 'network.general'

    @accepts()
    @returns(
        Dict(
            'network_summary',
            Dict('ips', additional_attrs=True, required=True),
            List('default_routes', items=[IPAddr('default_route')], required=True),
            List('nameservers', items=[IPAddr('nameserver')], required=True),
        )
    )
    async def summary(self):
        """
        Retrieve general information for current Network.

        Returns a dictionary. For example:

        .. examples(websocket)::

            :::javascript
            {
                "ips": {
                    "vtnet0": {
                        "IPV4": [
                            "192.168.0.15/24"
                        ]
                    }
                },
                "default_routes": [
                    "192.168.0.1"
                ],
                "nameservers": [
                    "192.168.0.1"
                ]
            }
        """
        ips = defaultdict(lambda: defaultdict(list))
        for iface in await self.middleware.call('interface.query'):
            for alias in iface['state']['aliases']:
                if alias['type'] == 'INET':
                    key = 'IPV4'
                elif alias['type'] == 'INET6':
                    key = 'IPV6'
                else:
                    continue
                ips[iface['name']][key].append(f'{alias["address"]}/{alias["netmask"]}')

        default_routes = []
        for route in await self.middleware.call('route.system_routes', [('netmask', 'in', ['0.0.0.0', '::'])]):
            # IPv6 have local addresses that don't have gateways. Make sure we only return a gateway
            # if there is one.
            if route['gateway']:
                default_routes.append(route['gateway'])

        nameservers = []
        for ns in await self.middleware.call('dns.query'):
            nameservers.append(ns['nameserver'])

        return {
            'ips': ips,
            'default_routes': default_routes,
            'nameservers': nameservers,
        }
예제 #3
0
class CoreService(Service):
    @filterable
    def get_jobs(self, filters=None, options=None):
        """Get the long running jobs."""
        jobs = filter_list([
            i.__encode__() for i in list(self.middleware.jobs.all().values())
        ], filters, options)
        return jobs

    @accepts(Int('id'))
    @job()
    def job_wait(self, job, id):
        target_job = self.middleware.jobs.get(id)
        target_job.wait_sync()
        if target_job.error:
            raise CallError(target_job.error)
        else:
            return target_job.result

    @accepts(Int('id'),
             Dict(
                 'job-update',
                 Dict('progress', additional_attrs=True),
             ))
    def job_update(self, id, data):
        job = self.middleware.jobs.all()[id]
        progress = data.get('progress')
        if progress:
            job.set_progress(
                progress['percent'],
                description=progress.get('description'),
                extra=progress.get('extra'),
            )

    @private
    def notify_postinit(self):
        self.middleware.call_sync('migration.run')

        # Sentinel file to tell we have gone far enough in the boot process.
        # See #17508
        open('/tmp/.bootready', 'w').close()

        # Send event to middlewared saying we are late enough in the process to call it ready
        self.middleware.call_sync('core.event_send', 'system', 'ADDED',
                                  {'id': 'ready'})

    @accepts(Int('id'))
    def job_abort(self, id):
        job = self.middleware.jobs.all()[id]
        return job.abort()

    @accepts()
    def get_services(self):
        """Returns a list of all registered services."""
        services = {}
        for k, v in list(self.middleware.get_services().items()):
            if v._config.private is True:
                continue
            if isinstance(v, CRUDService):
                _typ = 'crud'
            elif isinstance(v, ConfigService):
                _typ = 'config'
            else:
                _typ = 'service'
            services[k] = {
                'config': {
                    k: v
                    for k, v in list(v._config.__dict__.items())
                    if not k.startswith(('_', 'process_pool', 'thread_pool'))
                },
                'type': _typ,
            }
        return services

    @accepts(Str('service', default=None, null=True))
    def get_methods(self, service=None):
        """Return methods metadata of every available service.

        `service` parameter is optional and filters the result for a single service."""
        data = {}
        for name, svc in list(self.middleware.get_services().items()):
            if service is not None and name != service:
                continue

            # Skip private services
            if svc._config.private:
                continue

            for attr in dir(svc):

                if attr.startswith('_'):
                    continue

                method = None
                # For CRUD.do_{update,delete} they need to be accounted
                # as "item_method", since they are just wrapped.
                item_method = None
                if isinstance(svc, CRUDService):
                    """
                    For CRUD the create/update/delete are special.
                    The real implementation happens in do_create/do_update/do_delete
                    so thats where we actually extract pertinent information.
                    """
                    if attr in ('create', 'update', 'delete'):
                        method = getattr(svc, 'do_{}'.format(attr), None)
                        if method is None:
                            continue
                        if attr in ('update', 'delete'):
                            item_method = True
                    elif attr in ('do_create', 'do_update', 'do_delete'):
                        continue
                elif isinstance(svc, ConfigService):
                    """
                    For Config the update is special.
                    The real implementation happens in do_update
                    so thats where we actually extract pertinent information.
                    """
                    if attr == 'update':
                        original_name = 'do_{}'.format(attr)
                        if hasattr(svc, original_name):
                            method = getattr(svc, original_name, None)
                        else:
                            method = getattr(svc, attr)
                        if method is None:
                            continue
                    elif attr in ('do_update'):
                        continue

                if method is None:
                    method = getattr(svc, attr, None)

                if method is None or not callable(method):
                    continue

                # Skip private methods
                if hasattr(method, '_private'):
                    continue

                # terminate is a private method used to clean up a service on shutdown
                if attr == 'terminate':
                    continue

                examples = defaultdict(list)
                doc = inspect.getdoc(method)
                if doc:
                    """
                    Allow method docstring to have sections in the format of:

                      .. section_name::

                    Currently the following sections are available:

                      .. examples:: - goes into `__all__` list in examples
                      .. examples(rest):: - goes into `rest` list in examples
                      .. examples(websocket):: - goes into `websocket` list in examples
                    """
                    sections = re.split(r'^.. (.+?)::$', doc, flags=re.M)
                    doc = sections[0]
                    for i in range(int((len(sections) - 1) / 2)):
                        idx = (i + 1) * 2 - 1
                        reg = re.search(r'examples(?:\((.+)\))?',
                                        sections[idx])
                        if reg is None:
                            continue
                        exname = reg.groups()[0]
                        if exname is None:
                            exname = '__all__'
                        examples[exname].append(sections[idx + 1])

                accepts = getattr(method, 'accepts', None)
                if accepts:
                    accepts = [
                        i.to_json_schema() for i in accepts
                        if not getattr(i, 'hidden', False)
                    ]

                data['{0}.{1}'.format(name, attr)] = {
                    'description':
                    doc,
                    'examples':
                    examples,
                    'accepts':
                    accepts,
                    'item_method':
                    True if item_method else hasattr(method, '_item_method'),
                    'no_auth_required':
                    hasattr(method, '_no_auth_required'),
                    'filterable':
                    hasattr(method, '_filterable'),
                    'require_websocket':
                    hasattr(method, '_pass_app'),
                    'job':
                    hasattr(method, '_job'),
                    'downloadable':
                    hasattr(method, '_job')
                    and method._job['pipes'] == ['output'],
                }
        return data

    @accepts()
    def get_events(self):
        """
        Returns metadata for every possible event emitted from websocket server.
        """
        events = {}
        for name, attrs in self.middleware.get_events():
            events[name] = {
                'description': attrs['description'],
            }
        return events

    @private
    async def call_hook(self, name, args, kwargs=None):
        kwargs = kwargs or {}
        await self.middleware.call_hook(name, *args, **kwargs)

    @private
    async def event_send(self, name, event_type, kwargs):
        self.middleware.send_event(name, event_type, **kwargs)

    @accepts()
    def ping(self):
        """
        Utility method which just returns "pong".
        Can be used to keep connection/authtoken alive instead of using
        "ping" protocol message.
        """
        return 'pong'

    @accepts(
        Str('method'),
        List('args', default=[]),
        Str('filename'),
    )
    async def download(self, method, args, filename):
        """
        Core helper to call a job marked for download.

        Returns the job id and the URL for download.
        """
        job = await self.middleware.call(
            method, *args, pipes=Pipes(output=self.middleware.pipe()))
        token = await self.middleware.call('auth.generate_token', 300, {
            'filename': filename,
            'job': job.id
        })
        self.middleware.fileapp.register_job(job.id)
        return job.id, f'/_download/{job.id}?auth_token={token}'

    @private
    def reconfigure_logging(self):
        """
        When /var/log gets moved because of system dataset
        we need to make sure the log file is reopened because
        of the new location
        """
        handler = logging._handlers.get('file')
        if handler:
            stream = handler.stream
            handler.stream = handler._open()
            if sys.stdout is stream:
                sys.stdout = handler.stream
                sys.stderr = handler.stream
            try:
                stream.close()
            except Exception:
                pass

    @private
    @accepts(Dict(
        'core-job',
        Int('sleep'),
    ))
    @job()
    def job_test(self, job, data=None):
        """
        Private no-op method to test a job, simply returning `true`.
        """
        if data is None:
            data = {}

        sleep = data.get('sleep')
        if sleep is not None:

            def sleep_fn():
                i = 0
                while i < sleep:
                    job.set_progress((i / sleep) * 100)
                    time.sleep(1)
                    i += 1
                job.set_progress(100)

            t = threading.Thread(target=sleep_fn, daemon=True)
            t.start()
            t.join()
        return True

    @accepts(
        Str('engine', enum=['PTVS', 'PYDEV']),
        Dict(
            'options',
            Str('secret'),
            Str('bind_address', default='0.0.0.0'),
            Int('bind_port', default=3000),
            Str('host'),
            Bool('wait_attach', default=False),
            Str('local_path'),
        ),
    )
    async def debug(self, engine, options):
        """
        Setup middlewared for remote debugging.

        engines:
          - PTVS: Python Visual Studio
          - PYDEV: Python Dev (Eclipse/PyCharm)

        options:
          - secret: password for PTVS
          - host: required for PYDEV, hostname of local computer (developer workstation)
          - local_path: required for PYDEV, path for middlewared source in local computer (e.g. /home/user/freenas/src/middlewared/middlewared
        """
        if engine == 'PTVS':
            import ptvsd
            if 'secret' not in options:
                raise ValidationError('secret', 'secret is required for PTVS')
            ptvsd.enable_attach(
                options['secret'],
                address=(options['bind_address'], options['bind_port']),
            )
            if options['wait_attach']:
                ptvsd.wait_for_attach()
        elif engine == 'PYDEV':
            for i in ('host', 'local_path'):
                if i not in options:
                    raise ValidationError(i, f'{i} is required for PYDEV')
            os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps([
                [
                    options['local_path'],
                    '/usr/local/lib/python3.7/site-packages/middlewared'
                ],
            ])
            import pydevd
            pydevd.stoptrace()
            pydevd.settrace(host=options['host'])

    @private
    def threads_stacks(self):
        return get_threads_stacks()

    @accepts(Str("method"), List("params", default=[]))
    @job(lock=lambda args: f"bulk:{args[0]}")
    async def bulk(self, job, method, params):
        """
        Will loop on a list of items for the given method, returning a list of
        dicts containing a result and error key.

        Result will be the message returned by the method being called,
        or a string of an error, in which case the error key will be the
        exception
        """
        statuses = []
        progress_step = 100 / len(params)
        current_progress = 0

        for p in params:
            try:
                msg = await self.middleware.call(method, *p)
                error = None

                if isinstance(msg, Job):
                    job = msg
                    msg = await msg.wait()

                    if job.error:
                        error = job.error

                statuses.append({"result": msg, "error": error})
            except Exception as e:
                statuses.append({"result": None, "error": str(e)})

            current_progress += progress_step
            job.set_progress(current_progress)

        return statuses
예제 #4
0
class ZFSDatasetService(CRUDService):

    class Config:
        namespace = 'zfs.dataset'
        private = True
        process_pool = True

    def flatten_datasets(self, datasets):
        return sum([[deepcopy(ds)] + self.flatten_datasets(ds['children']) for ds in datasets], [])

    @filterable
    def query(self, filters=None, options=None):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.top_level_properties` is a list of properties which we will like to include in the
        top level dict of dataset. It defaults to adding only mountpoint key keeping legacy behavior. If none are
        desired in top level dataset, an empty list should be passed else if null is specified it will add mountpoint
        key to the top level dict if it's present in `query-options.extra.properties` or it's null as well.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property ( `mountpoint` is special in this
        case and is controlled by `query-options.extra.mountpoint` attribute ).

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        top_level_props = None if extra.get('top_level_properties') is None else extra['top_level_properties'].copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            if filters and len(filters) == 1 and list(filters[0][:2]) == ['id', '=']:
                state_options = {
                    'snapshots': extra.get('snapshots', False),
                    'recursive': extra.get('recursive', True),
                    'snapshots_recursive': extra.get('snapshots_recursive', False)
                }
                try:
                    datasets = [zfs.get_dataset(filters[0][2]).__getstate__(**state_options)]
                except libzfs.ZFSException:
                    datasets = []
            else:
                datasets = zfs.datasets_serialized(
                    props=props, top_level_props=top_level_props, user_props=user_properties
                )
                if flat:
                    datasets = self.flatten_datasets(datasets)
                else:
                    datasets = list(datasets)

        return filter_list(datasets, filters, options)

    def query_for_quota_alert(self):
        return [
            {
                k: v for k, v in dataset['properties'].items()
                if k in [
                    "name", "quota", "available", "refquota", "usedbydataset", "mounted", "mountpoint",
                    "org.freenas:quota_warning", "org.freenas:quota_critical",
                    "org.freenas:refquota_warning", "org.freenas:refquota_critical"
                ]
            }
            for dataset in self.query()
        ]

    def common_load_dataset_checks(self, ds):
        self.common_encryption_checks(ds)
        if ds.key_loaded:
            raise CallError(f'{id} key is already loaded')

    def common_encryption_checks(self, ds):
        if not ds.encrypted:
            raise CallError(f'{id} is not encrypted')

    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type': 'DATASET',
                'id': ds,
                'name': ds,
                'quota': int(dataset['properties']['quota']['rawvalue']),
                'refquota': int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes': int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            ['zfs', f'{quota_type}space', '-H', '-n', '-p', '-o', 'name,used,quota,objquota,objused', ds],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s',
                                  quota_type.lower(), quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry['quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry['obj_quota'] * 100

            try:
                if quota_type == 'USER':
                    entry['name'] = (
                        self.middleware.call_sync('user.get_user_obj',
                                                  {'uid': entry['id']})
                    )['pw_name']
                else:
                    entry['name'] = (
                        self.middleware.call_sync('group.get_group_obj',
                                                  {'gid': entry['id']})
                    )['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list

    def set_quota(self, ds, quota_list):
        cmd = ['zfs', 'set']
        cmd.extend(quota_list)
        cmd.append(ds)
        quota_set = subprocess.run(cmd, check=False)
        if quota_set.returncode != 0:
            raise CallError(f'Failed to set userspace quota on {ds}: [{quota_set.stderr.decode()}]')

    @accepts(
        Str('id'),
        Dict(
            'load_key_options',
            Bool('mount', default=True),
            Bool('recursive', default=False),
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        ),
    )
    def load_key(self, id, options):
        mount_ds = options.pop('mount')
        recursive = options.pop('recursive')
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_load_dataset_checks(ds)
                ds.load_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to load key for {id}', exc_info=True)
            raise CallError(f'Failed to load key for {id}: {e}')
        else:
            if mount_ds:
                self.mount(id, {'recursive': recursive})

    @accepts(Str('name'), List('params', default=[], private=True))
    @job()
    def bulk_process(self, job, name, params):
        f = getattr(self, name, None)
        if not f:
            raise CallError(f'{name} method not found in zfs.dataset')

        statuses = []
        for i in params:
            result = error = None
            try:
                result = f(*i)
            except Exception as e:
                error = str(e)
            finally:
                statuses.append({'result': result, 'error': error})

        return statuses

    @accepts(
        Str('id'),
        Dict(
            'check_key',
            Any('key', default=None, null=True),
            Str('key_location', default=None, null=True),
        )
    )
    def check_key(self, id, options):
        """
        Returns `true` if the `key` is valid, `false` otherwise.
        """
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                return ds.check_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to check key for {id}', exc_info=True)
            raise CallError(f'Failed to check key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'unload_key_options',
            Bool('recursive', default=False),
            Bool('force_umount', default=False),
            Bool('umount', default=False),
        )
    )
    def unload_key(self, id, options):
        force = options.pop('force_umount')
        if options.pop('umount') and self.middleware.call_sync('zfs.dataset.get_instance', id)['mountpoint']:
            self.umount(id, {'force': force})
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                if not ds.key_loaded:
                    raise CallError(f'{id}\'s key is not loaded')
                ds.unload_key(**options)
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to unload key for {id}', exc_info=True)
            raise CallError(f'Failed to unload key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_key_options',
            Dict(
                'encryption_properties',
                Str('keyformat'),
                Str('keylocation'),
                Int('pbkdf2iters')
            ),
            Bool('load_key', default=True),
            Any('key', default=None, null=True),
        ),
    )
    def change_key(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                self.common_encryption_checks(ds)
                ds.change_key(props=options['encryption_properties'], load_key=options['load_key'], key=options['key'])
        except libzfs.ZFSException as e:
            self.logger.error(f'Failed to change key for {id}', exc_info=True)
            raise CallError(f'Failed to change key for {id}: {e}')

    @accepts(
        Str('id'),
        Dict(
            'change_encryption_root_options',
            Bool('load_key', default=True),
        )
    )
    def change_encryption_root(self, id, options):
        try:
            with libzfs.ZFS() as zfs:
                ds = zfs.get_dataset(id)
                ds.change_key(load_key=options['load_key'], inherit=True)
        except libzfs.ZFSException as e:
            raise CallError(f'Failed to change encryption root for {id}: {e}')

    @accepts(Dict(
        'dataset_create',
        Str('name', required=True),
        Str('type', enum=['FILESYSTEM', 'VOLUME'], default='FILESYSTEM'),
        Dict(
            'properties',
            Bool('sparse'),
            additional_attrs=True,
        ),
    ))
    def do_create(self, data):
        """
        Creates a ZFS dataset.
        """

        verrors = ValidationErrors()

        if '/' not in data['name']:
            verrors.add('name', 'You need a full name, e.g. pool/newdataset')

        if verrors:
            raise verrors

        properties = data.get('properties') or {}
        sparse = properties.pop('sparse', False)
        params = {}

        for k, v in data['properties'].items():
            params[k] = v

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(data['name'].split('/')[0])
                pool.create(data['name'], params, fstype=getattr(libzfs.DatasetType, data['type']), sparse_vol=sparse)
        except libzfs.ZFSException as e:
            self.logger.error('Failed to create dataset', exc_info=True)
            raise CallError(f'Failed to create dataset: {e}')

    @accepts(
        Str('id'),
        Dict(
            'dataset_update',
            Dict(
                'properties',
                additional_attrs=True,
            ),
        ),
    )
    def do_update(self, id, data):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(id)

                if 'properties' in data:
                    properties = data['properties'].copy()
                    # Set these after reservations
                    for k in ['quota', 'refquota']:
                        if k in properties:
                            properties[k] = properties.pop(k)  # Set them last
                    for k, v in properties.items():

                        # If prop already exists we just update it,
                        # otherwise create a user property
                        prop = dataset.properties.get(k)
                        try:
                            if prop:
                                if v.get('source') == 'INHERIT':
                                    prop.inherit(recursive=v.get('recursive', False))
                                elif 'value' in v and (
                                    prop.value != v['value'] or prop.source.name == 'INHERITED'
                                ):
                                    prop.value = v['value']
                                elif 'parsed' in v and (
                                    prop.parsed != v['parsed'] or prop.source.name == 'INHERITED'
                                ):
                                    prop.parsed = v['parsed']
                            else:
                                if v.get('source') == 'INHERIT':
                                    pass
                                else:
                                    if 'value' not in v:
                                        raise ValidationError(
                                            'properties', f'properties.{k} needs a "value" attribute'
                                        )
                                    if ':' not in k:
                                        raise ValidationError(
                                            'properties', f'User property needs a colon (:) in its name`'
                                        )
                                    prop = libzfs.ZFSUserProperty(v['value'])
                                    dataset.properties[k] = prop
                        except libzfs.ZFSException as e:
                            raise ZFSSetPropertyError(k, str(e))

        except libzfs.ZFSException as e:
            self.logger.error('Failed to update dataset', exc_info=True)
            raise CallError(f'Failed to update dataset: {e}')

    def do_delete(self, id, options=None):
        options = options or {}
        force = options.get('force', False)
        recursive = options.get('recursive', False)

        args = []
        if force:
            args += ['-f']
        if recursive:
            args += ['-r']

        # If dataset is mounted and has receive_resume_token, we should destroy it or ZFS will say
        # "cannot destroy 'pool/dataset': dataset already exists"
        recv_run = subprocess.run(['zfs', 'recv', '-A', id], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
        # Destroying may take a long time, lets not use py-libzfs as it will block
        # other ZFS operations.
        try:
            subprocess.run(
                ['zfs', 'destroy'] + args + [id], text=True, capture_output=True, check=True,
            )
        except subprocess.CalledProcessError as e:
            if recv_run.returncode == 0 and e.stderr.strip().endswith('dataset does not exist'):
                # This operation might have deleted this dataset if it was created by `zfs recv` operation
                return
            self.logger.error('Failed to delete dataset', exc_info=True)
            error = e.stderr.strip()
            errno_ = errno.EFAULT
            if "Device busy" in error:
                errno_ = errno.EBUSY
            raise CallError(f'Failed to delete dataset: {error}', errno_)

    @accepts(Str('name'), Dict('options', Bool('recursive', default=False)))
    def mount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                if options['recursive']:
                    dataset.mount_recursive()
                else:
                    dataset.mount()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to mount dataset', exc_info=True)
            raise CallError(f'Failed to mount dataset: {e}')

    @accepts(Str('name'), Dict('options', Bool('force', default=False)))
    def umount(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.umount(force=options['force'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to umount dataset', exc_info=True)
            raise CallError(f'Failed to umount dataset: {e}')

    @accepts(
        Str('dataset'),
        Dict(
            'options',
            Str('new_name', required=True, empty=False),
            Bool('recursive', default=False)
        )
    )
    def rename(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.rename(options['new_name'], recursive=options['recursive'])
        except libzfs.ZFSException as e:
            self.logger.error('Failed to rename dataset', exc_info=True)
            raise CallError(f'Failed to rename dataset: {e}')

    def promote(self, name):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                dataset.promote()
        except libzfs.ZFSException as e:
            self.logger.error('Failed to promote dataset', exc_info=True)
            raise CallError(f'Failed to promote dataset: {e}')

    def inherit(self, name, prop, recursive=False):
        try:
            with libzfs.ZFS() as zfs:
                dataset = zfs.get_dataset(name)
                zprop = dataset.properties.get(prop)
                if not zprop:
                    raise CallError(f'Property {prop!r} not found.', errno.ENOENT)
                zprop.inherit(recursive=recursive)
        except libzfs.ZFSException as e:
            raise CallError(str(e))
예제 #5
0
class MailService(ConfigService):

    class Config:
        datastore = 'system.email'
        datastore_prefix = 'em_'
        datastore_extend = 'mail.mail_extend'

    @private
    async def mail_extend(self, cfg):
        if cfg['security']:
            cfg['security'] = cfg['security'].upper()
        return cfg

    @accepts(Dict(
        'mail_update',
        Str('fromemail', validators=[Email()]),
        Str('fromname'),
        Str('outgoingserver'),
        Int('port'),
        Str('security', enum=['PLAIN', 'SSL', 'TLS']),
        Bool('smtp'),
        Str('user'),
        Str('pass', private=True),
        register=True,
        update=True,
    ))
    async def do_update(self, data):
        """
        Update Mail Service Configuration.

        `fromemail` is used as a sending address which the mail server will use for sending emails.

        `outgoingserver` is the hostname or IP address of SMTP server used for sending an email.

        `security` is type of encryption desired.

        `smtp` is a boolean value which when set indicates that SMTP authentication has been enabled and `user`/`pass`
        are required attributes now.
        """
        config = await self.config()

        new = config.copy()
        new.update(data)
        new['security'] = new['security'].lower()  # Django Model compatibility

        verrors = ValidationErrors()

        if new['smtp'] and new['user'] == '':
            verrors.add(
                'mail_update.user',
                'This field is required when SMTP authentication is enabled',
            )

        self.__password_verify(new['pass'], 'mail_update.pass', verrors)

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', 'system.email', config['id'], new, {'prefix': 'em_'})
        return await self.config()

    def __password_verify(self, password, schema, verrors=None):
        if not password:
            return
        if verrors is None:
            verrors = ValidationErrors()
        # FIXME: smtplib does not support non-ascii password yet
        # https://github.com/python/cpython/pull/8938
        try:
            password.encode('ascii')
        except UnicodeEncodeError:
            verrors.add(
                schema,
                'Only plain text characters (7-bit ASCII) are allowed in passwords. '
                'UTF or composed characters are not allowed.'
            )
        return verrors

    @accepts(Dict(
        'mail_message',
        Str('subject', required=True),
        Str('text', required=True, max_length=None),
        Str('html', null=True, max_length=None),
        List('to', items=[Str('email')]),
        List('cc', items=[Str('email')]),
        Int('interval', null=True),
        Str('channel', null=True),
        Int('timeout', default=300),
        Bool('attachments', default=False),
        Bool('queue', default=True),
        Dict('extra_headers', additional_attrs=True),
        register=True,
    ), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    def send(self, job, message, config):
        """
        Sends mail using configured mail settings.

        `text` will be formatted to HTML using Markdown and rendered using default E-Mail template.
        You can put your own HTML using `html`. If `html` is null, no HTML MIME part will be added to E-Mail.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        product_name = self.middleware.call_sync('system.product_name')

        gc = self.middleware.call_sync('datastore.config', 'network.globalconfiguration')

        hostname = f'{gc["gc_hostname"]}.{gc["gc_domain"]}'

        message['subject'] = f'{product_name} {hostname}: {message["subject"]}'

        if 'html' in message and message['html'] is None:
            message.pop('html')
        elif 'html' not in message:
            lookup = TemplateLookup(
                directories=[os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/templates')],
                module_directory="/tmp/mako/templates")

            tmpl = lookup.get_template('mail.html')

            message['html'] = tmpl.render(body=html.escape(message['text']).replace('\n', '<br>\n'))

        return self.send_raw(job, message, config)

    @accepts(Ref('mail_message'), Ref('mail_update'))
    @job(pipes=['input'], check_pipes=False)
    @private
    def send_raw(self, job, message, config):
        config = dict(self.middleware.call_sync('mail.config'), **config)

        if config['fromname']:
            from_addr = Header(config['fromname'], 'utf-8')
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
            else:
                from_addr.append(f'<{config["fromemail"]}>', 'ascii')
        else:
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr = Header(config['fromemail'], 'utf-8')
            else:
                from_addr = Header(config['fromemail'], 'ascii')

        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.info')['version'].split('-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError('This message was already sent in the given interval')

        verrors = self.__password_verify(config['pass'], 'mail-config.pass')
        if verrors:
            raise verrors
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync(
                    'user.query', [('username', '=', 'root')], {'get': True}
                )['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        if message.get('attachments'):
            job.check_pipe("input")

            def read_json():
                f = job.pipes.input.r
                data = b''
                i = 0
                while True:
                    read = f.read(1048576)  # 1MiB
                    if read == b'':
                        break
                    data += read
                    i += 1
                    if i > 50:
                        raise ValueError('Attachments bigger than 50MB not allowed yet')
                if data == b'':
                    return None
                return json.loads(data)

            attachments = read_json()
        else:
            attachments = None

        if 'html' in message or attachments:
            msg = MIMEMultipart()
            msg.preamble = message['text']
            if 'html' in message:
                msg2 = MIMEMultipart('alternative')
                msg2.attach(MIMEText(message['text'], 'plain', _charset='utf-8'))
                msg2.attach(MIMEText(message['html'], 'html', _charset='utf-8'))
                msg.attach(msg2)
            if attachments:
                for attachment in attachments:
                    m = Message()
                    m.set_payload(attachment['content'])
                    for header in attachment.get('headers'):
                        m.add_header(header['name'], header['value'], **(header.get('params') or {}))
                    msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        msg['Subject'] = message['subject']

        msg['From'] = from_addr
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"), base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            # We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
            # do will break python e-mail module.
            if key.lower() == "сontent-type":
                continue

            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        try:
            server = self._get_smtp_server(config, message['timeout'], local_hostname=local_hostname)
            # NOTE: Don't do this.
            #
            # If smtplib.SMTP* tells you to run connect() first, it's because the
            # mailserver it tried connecting to via the outgoing server argument
            # was unreachable and it tried to connect to 'localhost' and barfed.
            # This is because FreeNAS doesn't run a full MTA.
            # else:
            #    server.connect()
            headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
            syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
            server.sendmail(from_addr.encode(), to, msg.as_string())
            server.quit()
        except Exception as e:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            # We are only interested in ValueError, not subclasses.
            if e.__class__ is ValueError:
                raise CallError(str(e))
            syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
            if isinstance(e, smtplib.SMTPAuthenticationError):
                raise CallError(f'Authentication error ({e.smtp_code}): {e.smtp_error}', errno.EAUTH)
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True

    def _get_smtp_server(self, config, timeout=300, local_hostname=None):
        if local_hostname is None:
            local_hostname = socket.gethostname()

        if not config['outgoingserver'] or not config['port']:
            # See NOTE below.
            raise ValueError('you must provide an outgoing mailserver and mail'
                             ' server port when sending mail')
        if config['security'] == 'SSL':
            server = smtplib.SMTP_SSL(
                config['outgoingserver'],
                config['port'],
                timeout=timeout,
                local_hostname=local_hostname)
        else:
            server = smtplib.SMTP(
                config['outgoingserver'],
                config['port'],
                timeout=timeout,
                local_hostname=local_hostname)
            if config['security'] == 'TLS':
                server.starttls()
        if config['smtp']:
            server.login(config['user'], config['pass'])
        return server

    @periodic(600, run_on_start=False)
    @private
    def send_mail_queue(self):

        with MailQueue() as mq:
            for queue in list(mq.queue):
                try:
                    config = self.middleware.call_sync('mail.config')
                    server = self._get_smtp_server(config)
                    server.sendmail(queue.message['From'].encode(), queue.message['To'].split(', '), queue.message.as_string())
                    server.quit()
                except Exception:
                    self.logger.debug('Sending message from queue failed', exc_info=True)
                    queue.attempts += 1
                    if queue.attempts >= mq.MAX_ATTEMPTS:
                        mq.queue.remove(queue)
                else:
                    mq.queue.remove(queue)
예제 #6
0
파일: ups.py 프로젝트: mike0615/freenas
class UPSService(SystemServiceService):
    DRIVERS_AVAILABLE = set(os.listdir(DRIVER_BIN_DIR))

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        data['toemail'] = [v for v in data['toemail'].split(';') if v]
        return data

    @accepts()
    async def port_choices(self):
        ports = [x for x in glob.glob('/dev/cua*') if x.find('.') == -1]
        ports.extend(glob.glob('/dev/ugen*'))
        ports.extend(glob.glob('/dev/uhid*'))
        return ports

    @accepts()
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        if os.path.exists("/conf/base/etc/local/nut/driver.list"):
            with open('/conf/base/etc/local/nut/driver.list', 'rb') as f:
                d = f.read().decode('utf8', 'ignore')
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_annotation = ''
                m = re.match(r'(.+) \((.+)\)',
                             driver_str)  # "blazer_usb (USB ID 0665:5161)"
                if m:
                    driver_str, driver_annotation = m.group(1), m.group(2)
                for driver in driver_str.split(
                        ' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in self.DRIVERS_AVAILABLE:
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    ups_choices['$'.join(
                        [driver, row[3]])] = '%s (%s)' % (' '.join(
                            filter(None, row[0:last])), ', '.join(
                                filter(None, [driver, driver_annotation])))
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (
                    await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(f'{schema}.identifier',
                            'Use alphanumeric characters, ".", "-" and "_"')

        for field in [
                field for field in ['monpwd', 'monuser'] if data.get(field)
        ]:
            if re.search(r'[ #]', data[field], re.I):
                verrors.add(f'{schema}.{field}',
                            'Spaces or number signs are not allowed')

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(lambda f: not data[f], ['port', 'driver']):
                verrors.add(f'{schema}.{field}', 'This field is required')
        else:
            if not data.get('remotehost'):
                verrors.add(f'{schema}.remotehost', 'This field is required')

        to_emails = data.get('toemail')
        if to_emails:
            data['toemail'] = ';'.join(to_emails)
        else:
            data['toemail'] = ''

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        return verrors, data

    @accepts(
        Dict('ups_update',
             Bool('emailnotify'),
             Bool('powerdown'),
             Bool('rmonitor'),
             Int('nocommwarntime', null=True),
             Int('remoteport'),
             Int('shutdowntimer'),
             Int('hostsync', validators=[Range(min=0)]),
             Str('description'),
             Str('driver'),
             Str('extrausers', max_length=None),
             Str('identifier', empty=False),
             Str('mode', enum=['MASTER', 'SLAVE']),
             Str('monpwd', empty=False),
             Str('monuser', empty=False),
             Str('options', max_length=None),
             Str('optionsupsd', max_length=None),
             Str('port'),
             Str('remotehost'),
             Str('shutdown', enum=['LOWBATT', 'BATT']),
             Str('shutdowncmd', empty=False),
             Str('subject'),
             List('toemail', items=[Str('email', validators=[Email()])]),
             update=True))
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `emailnotify` when enabled, sends out notifications of different UPS events via email.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".

        `toemail` is a list of valid email id's on which notification emails are sent.
        """
        config = await self.config()
        old_config = config.copy()
        config.update(data)
        verros, config = await self.validate_data(config, 'ups_update')
        if verros:
            raise verros

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()
        old_config['toemail'] = ';'.join(
            old_config['toemail']) if old_config['toemail'] else ''

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        config = await self.config()

        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert,
                                       {'ups': config['identifier']})

    @private
    @accepts(Str('notify_type'))
    async def upssched_event(self, notify_type):
        config = await self.config()

        if config['mode'] == 'MASTER':
            upsc_identifier = f'{config["identifier"]}@localhost:{config["remoteport"]}'
        else:
            upsc_identifier = f'{config["identifier"]}@{config["remotehost"]}:{config["remoteport"]}'

        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            stats_output = (await run('/usr/local/bin/upsc',
                                      upsc_identifier,
                                      check=False)).stdout

            ups_status = re.findall(
                fr'ups.status: (.*)',
                '' if not stats_output else stats_output.decode())
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).')
            else:
                syslog.syslog(syslog.LOG_NOTICE,
                              'upssched-cmd "issuing shutdown"')
                await run('/usr/local/sbin/upsmon', '-c', 'fsd', check=False)
        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                await self.middleware.call('alert.oneshot_create',
                                           alert_mapping[notify_type],
                                           {'ups': config['identifier']})

            if config['emailnotify']:
                # Email user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # NOTIFICATION: 'LOWBATT'
                # UPS: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900

                ups_name = config['identifier']
                hostname = (await
                            self.middleware.call('system.info'))['hostname']
                current_time = datetime.datetime.now(tz=dateutil.tz.tzlocal(
                )).strftime('%a %b %d %H:%M:%S %Z %Y')
                ups_subject = config['subject'].replace('%d',
                                                        current_time).replace(
                                                            '%h', hostname)
                body = f'NOTIFICATION: {notify_type!r}<br>UPS: {ups_name!r}<br><br>'

                # Let's gather following stats
                data_points = {
                    'battery.charge':
                    'Battery charge (percent)',
                    'battery.charge.low':
                    'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status':
                    'Battery charge status',
                    'battery.runtime':
                    'Battery runtime (seconds)',
                    'battery.runtime.low':
                    'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart':
                    'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (await run('/usr/local/bin/upsc',
                                          upsc_identifier,
                                          check=False)).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode())

                if recovered_stats:
                    body += 'Statistics recovered:<br><br>'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}<br>  {stat[0]}: {stat[1]}<br><br>'

                else:
                    body += 'Statistics could not be recovered<br>'

                # Subject and body defined, send email
                job = await self.middleware.call('mail.send', {
                    'subject': ups_subject,
                    'text': body,
                    'to': config['toemail']
                })

                await job.wait()
                if job.error:
                    self.middleware.logger.debug(
                        f'Failed to send UPS status email: {job.error}')

        else:
            self.middleware.logger.debug(
                f'Unrecognized UPS notification event: {notify_type}')
예제 #7
0
class KerberosRealmService(CRUDService):
    class Config:
        datastore = 'directoryservice.kerberosrealm'
        datastore_prefix = 'krb_'
        datastore_extend = 'kerberos.realm.kerberos_extend'
        namespace = 'kerberos.realm'

    @private
    async def kerberos_extend(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = data[param].split(' ') if data[param] else []

        return data

    @private
    async def kerberos_compress(self, data):
        for param in ['kdc', 'admin_server', 'kpasswd_server']:
            data[param] = ' '.join(data[param])

        return data

    @accepts(
        Dict('kerberos_realm_create',
             Str('realm', required=True),
             List('kdc', default=[]),
             List('admin_server', default=[]),
             List('kpasswd_server', default=[]),
             register=True))
    async def do_create(self, data):
        """
        Create a new kerberos realm. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.

        Entries for kdc, admin_server, and kpasswd_server are not required.
        If they are unpopulated, then kerberos will use DNS srv records to
        discover the correct servers. The option to hard-code them is provided
        due to AD site discovery. Kerberos has no concept of Active Directory
        sites. This means that middleware performs the site discovery and
        sets the kerberos configuration based on the AD site.
        """
        verrors = ValidationErrors()

        verrors.add_child('kerberos_realm_create', await self._validate(data))

        if verrors:
            raise verrors

        data = await self.kerberos_compress(data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        await self.middleware.call('etc.generate', 'kerberos')
        await self.middleware.call('service.restart', 'cron')
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("kerberos_realm_create", "kerberos_realm_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a kerberos realm by id. This will be automatically populated during the
        domain join process in an Active Directory environment. Kerberos realm names
        are case-sensitive, but convention is to only use upper-case.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        data = await self.kerberos_compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('etc.generate', 'kerberos')
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a kerberos realm by ID.
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
        await self.middleware.call('etc.generate', 'kerberos')

    @private
    async def _validate(self, data):
        verrors = ValidationErrors()
        realms = await self.query()
        for realm in realms:
            if realm['realm'].upper() == data['realm'].upper():
                verrors.add(
                    f'kerberos_realm',
                    f'kerberos realm with name {realm["realm"]} already exists.'
                )
        return verrors
예제 #8
0
class SMBService(SystemServiceService):
    class Config:
        service = 'cifs'
        service_verb = 'restart'
        datastore = 'services.cifs'
        datastore_extend = 'smb.smb_extend'
        datastore_prefix = 'cifs_srv_'

    @private
    async def smb_extend(self, smb):
        """Extend smb for netbios."""
        smb['netbiosname_local'] = smb['netbiosname']
        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.node') == 'B':
            smb['netbiosname_local'] = smb['netbiosname_b']

        smb['netbiosalias'] = (smb['netbiosalias'] or '').split()

        smb['loglevel'] = LOGLEVEL_MAP.get(smb['loglevel'])

        smb.pop('secrets')

        return smb

    async def __validate_netbios_name(self, name):
        return RE_NETBIOSNAME.match(name)

    async def unixcharset_choices(self):
        return await self.generate_choices([
            'UTF-8', 'ISO-8859-1', 'ISO-8859-15', 'GB2312', 'EUC-JP', 'ASCII'
        ])

    @private
    async def generate_choices(self, initial):
        def key_cp(encoding):
            cp = re.compile(
                r"(?P<name>CP|GB|ISO-8859-|UTF-)(?P<num>\d+)").match(encoding)
            if cp:
                return tuple((cp.group('name'), int(cp.group('num'), 10)))
            else:
                return tuple((encoding, float('inf')))

        charset = await self.common_charset_choices()
        return {
            v: v
            for v in
            [c for c in sorted(charset, key=key_cp) if c not in initial] +
            initial
        }

    @accepts()
    async def bindip_choices(self):
        """
        List of valid choices for IP addresses to which to bind the SMB service.
        Addresses assigned by DHCP are excluded from the results.
        """
        choices = {}
        for i in await self.middleware.call('interface.ip_in_use'):
            choices[i['address']] = i['address']
        return choices

    @accepts()
    async def domain_choices(self):
        """
        List of domains visible to winbindd. Returns empty list if winbindd is
        stopped.
        """
        domains = []
        wb = await run([SMBCmd.WBINFO.value, '-m'], check=False)
        if wb.returncode == 0:
            domains = wb.stdout.decode().splitlines()

        return domains

    @private
    async def common_charset_choices(self):
        def check_codec(encoding):
            try:
                return encoding.upper() if codecs.lookup(encoding) else False
            except LookupError:
                return False

        proc = await Popen(['/usr/bin/iconv', '-l'],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
        output = (await proc.communicate())[0].decode()

        encodings = set()
        for line in output.splitlines():
            enc = [e for e in line.split() if check_codec(e)]

            if enc:
                cp = enc[0]
                for e in enc:
                    if e in ('UTF-8', 'ASCII', 'GB2312', 'HZ-GB-2312',
                             'CP1361'):
                        cp = e
                        break

                encodings.add(cp)

        return encodings

    @private
    async def store_ldap_admin_password(self):
        """
        This is required if the LDAP directory service is enabled. The ldap admin dn and
        password are stored in private/secrets.tdb file.
        """
        ldap = await self.middleware.call('datastore.config',
                                          'directoryservice.ldap')
        if not ldap['ldap_enable']:
            return True

        set_pass = await run(
            ['usr/local/bin/smbpasswd', '-w', ldap['ldap_bindpw']],
            check=False)
        if set_pass.returncode != 0:
            self.logger.debug(
                f"Failed to set set ldap bindpw in secrets.tdb: {set_pass.stdout.decode()}"
            )
            return False

        return True

    @private
    def getparm(self, parm, section):
        """
        Get a parameter from the smb4.conf file. This is more reliable than
        'testparm --parameter-name'. testparm will fail in a variety of
        conditions without returning the parameter's value.
        """
        try:
            if section.upper() == 'GLOBAL':
                try:
                    LP_CTX.load(SMBPath.GLOBALCONF.platform())
                except Exception as e:
                    self.logger.warning("Failed to reload smb.conf: %s", e)

                return LP_CTX.get(parm)
            else:
                return self.middleware.call_sync('sharing.smb.reg_getparm',
                                                 section, parm)

        except Exception as e:
            raise CallError(
                f'Attempt to query smb4.conf parameter [{parm}] failed with error: {e}'
            )

    @private
    async def get_next_rid(self):
        next_rid = (await self.config())['next_rid']
        if next_rid == 0:
            try:
                private_dir = await self.middleware.call(
                    "smb.getparm", "private directory", "GLOBAL")
                next_rid = passdb.PDB(
                    f"tdbsam:{private_dir}/passdb.tdb").new_rid()
            except Exception:
                self.logger.warning(
                    "Failed to initialize RID counter from passdb. "
                    "Using default value for initialization.",
                    exc_info=True)
                next_rid = 5000

        await self.middleware.call('datastore.update', 'services.cifs', 1,
                                   {'next_rid': next_rid + 1},
                                   {'prefix': 'cifs_srv_'})
        return next_rid

    @private
    async def setup_directories(self):
        for p in SMBPath:
            if p == SMBPath.STATEDIR:
                path = await self.middleware.call("smb.getparm",
                                                  "state directory", "global")
            elif p == SMBPath.PRIVATEDIR:
                path = await self.middleware.call("smb.getparm", "privatedir",
                                                  "global")
            else:
                path = p.platform()

            try:
                if not await self.middleware.call('filesystem.acl_is_trivial',
                                                  path):
                    self.logger.warning(
                        "Inappropriate ACL detected on path [%s] stripping ACL",
                        path)
                    stripacl = await run(['setfacl', '-b', path], check=False)
                    if stripacl.returncode != 0:
                        self.logger.warning(
                            "Failed to strip ACL from path %s: %s", path,
                            stripacl.stderr.decode())
            except CallError:
                # Currently only time CallError is raise here is on ENOENT, which may be expected
                pass

            if not os.path.exists(path):
                if p.is_dir():
                    os.mkdir(path, p.mode())
            else:
                os.chmod(path, p.mode())

    @private
    async def import_conf_to_registry(self):
        drop = await run([SMBCmd.NET.value, 'conf', 'drop'], check=False)
        if drop.returncode != 0:
            self.logger.warning('failed to drop existing share config: %s',
                                drop.stderr.decode())
        load = await run(
            [SMBCmd.NET.value, 'conf', 'import',
             SMBPath.SHARECONF.platform()],
            check=False)
        if load.returncode != 0:
            self.logger.warning('failed to load share config: %s',
                                load.stderr.decode())

    @private
    @job(lock="smb_configure")
    async def configure(self, job):
        await self.reset_smb_ha_mode()
        job.set_progress(0, 'Preparing to configure SMB.')
        data = await self.config()
        job.set_progress(10, 'Generating SMB config.')
        await self.middleware.call('etc.generate', 'smb')

        # Following hack will be removed once we make our own samba package
        if osc.IS_LINUX:
            os.remove("/etc/samba/smb.conf")
            os.symlink("/etc/smb4.conf", "etc/samba/smb.conf")
        """
        Many samba-related tools will fail if they are unable to initialize
        a messaging context, which will happen if the samba-related directories
        do not exist or have incorrect permissions.
        """
        job.set_progress(20, 'Setting up SMB directories.')
        await self.setup_directories()
        job.set_progress(30, 'Setting up server SID.')
        await self.middleware.call('smb.set_sid', data['cifs_SID'])
        """
        If the ldap passdb backend is being used, then the remote LDAP server
        will provide the SMB users and groups. We skip these steps to avoid having
        samba potentially try to write our local users and groups to the remote
        LDAP server.
        """
        if await self.middleware.call("smb.getparm", "passdb backend",
                                      "global") == "tdbsam":
            job.set_progress(40, 'Synchronizing passdb and groupmap.')
            await self.middleware.call('etc.generate', 'user')
            pdb_job = await self.middleware.call("smb.synchronize_passdb")
            grp_job = await self.middleware.call(
                "smb.synchronize_group_mappings")
            await pdb_job.wait()
            await grp_job.wait()
            await self.middleware.call("admonitor.start")
        """
        The following steps ensure that we cleanly import our SMB shares
        into the registry.
        """
        job.set_progress(60, 'generating SMB share configuration.')
        await self.middleware.call('cache.put', 'SMB_REG_INITIALIZED', False)
        await self.middleware.call("etc.generate", "smb_share")
        await self.middleware.call("smb.import_conf_to_registry")
        await self.middleware.call('cache.put', 'SMB_REG_INITIALIZED', True)
        os.unlink(SMBPath.SHARECONF.platform())
        """
        It is possible that system dataset was migrated or an upgrade
        wiped our secrets.tdb file. Re-import directory service secrets
        if they are missing from the current running configuration.
        """
        job.set_progress(65, 'Initializing directory services')
        await self.middleware.call("directoryservices.initialize")

        job.set_progress(70, 'Checking SMB server status.')
        if await self.middleware.call("service.started", "cifs"):
            job.set_progress(80, 'Restarting SMB service.')
            await self.middleware.call("service.restart", "cifs")
        job.set_progress(100, 'Finished configuring SMB.')

    @private
    async def get_smb_ha_mode(self):
        if await self.middleware.call('cache.has_key', 'SMB_HA_MODE'):
            return await self.middleware.call('cache.get', 'SMB_HA_MODE')

        if not await self.middleware.call('system.is_freenas'
                                          ) and await self.middleware.call(
                                              'failover.licensed'):
            system_dataset = await self.middleware.call('systemdataset.config')
            if system_dataset['pool'] != await self.middleware.call(
                    'boot.pool_name'):
                hamode = SMBHAMODE['UNIFIED'].name
            else:
                hamode = SMBHAMODE['LEGACY'].name
        else:
            hamode = SMBHAMODE['STANDALONE'].name

        await self.middleware.call('cache.put', 'SMB_HA_MODE', hamode)
        return hamode

    @private
    async def reset_smb_ha_mode(self):
        await self.middleware.call('cache.pop', 'SMB_HA_MODE')
        return await self.get_smb_ha_mode()

    @private
    async def validate_smb(self, new, verrors):
        try:
            await self.middleware.call('sharing.smb.validate_aux_params',
                                       new['smb_options'],
                                       'smb_update.smb_options')
        except ValidationErrors as errs:
            verrors.add_child('smb_update.smb_options', errs)

        if new.get('unixcharset') and new[
                'unixcharset'] not in await self.unixcharset_choices():
            verrors.add('smb_update.unixcharset',
                        'Please provide a valid value for unixcharset')

        for i in ('workgroup', 'netbiosname', 'netbiosname_b', 'netbiosalias'):
            """
            There are two cases where NetBIOS names must be rejected:
            1. They contain invalid characters for NetBIOS protocol
            2. The name is identical to the NetBIOS workgroup.
            """
            if not i:
                continue

            if i == 'netbiosalias':
                for idx, item in enumerate(new[i]):
                    if not await self.__validate_netbios_name(item):
                        verrors.add(f'smb_update.{i}.{idx}',
                                    f'Invalid NetBIOS name: {item}')
                    if item.casefold() == new['workgroup'].casefold():
                        verrors.add(
                            f'smb_update.{i}.{idx}',
                            f'NetBIOS alias [{item}] conflicts with workgroup name.'
                        )
            else:
                if not await self.__validate_netbios_name(new[i]):
                    verrors.add(f'smb_update.{i}',
                                f'Invalid NetBIOS name: {new[i]}')

                if i != 'workgroup' and new[i].casefold(
                ) == new['workgroup'].casefold():
                    verrors.add(
                        f'smb_update.{i}',
                        f'NetBIOS name [{new[i]}] conflicts with workgroup name.'
                    )

        if new['guest'] == 'root':
            verrors.add('smb_update.guest',
                        '"root" is not a permitted guest account')

        if new.get('bindip'):
            bindip_choices = list((await self.bindip_choices()).keys())
            for idx, item in enumerate(new['bindip']):
                if item not in bindip_choices:
                    verrors.add(
                        f'smb_update.bindip.{idx}',
                        f'IP address [{item}] is not a configured address for this server'
                    )

        for i in ('filemask', 'dirmask'):
            if not new[i]:
                continue
            try:
                if int(new[i], 8) & ~0o11777:
                    raise ValueError('Not an octet')
            except (ValueError, TypeError):
                verrors.add(f'smb_update.{i}', 'Not a valid mask')

    @accepts(
        Dict(
            'smb_update',
            Str('netbiosname', max_length=15),
            Str('netbiosname_b', max_length=15),
            List('netbiosalias',
                 default=[],
                 items=[Str('netbios_alias', max_length=15)]),
            Str('workgroup'),
            Str('description'),
            Bool('enable_smb1'),
            Str('unixcharset'),
            Str('loglevel',
                enum=['NONE', 'MINIMUM', 'NORMAL', 'FULL', 'DEBUG']),
            Bool('syslog'),
            Bool('aapl_extensions'),
            Bool('localmaster'),
            Str('guest'),
            Str('admin_group', required=False, default=None, null=True),
            Str('filemask'),
            Str('dirmask'),
            Bool('ntlmv1_auth'),
            List('bindip', items=[IPAddr('ip')], default=[]),
            Str('smb_options', max_length=None),
            update=True,
        ))
    async def do_update(self, data):
        """
        Update SMB Service Configuration.

        `netbiosname` defaults to the original hostname of the system.

        `workgroup` and `netbiosname` should have different values.

        `enable_smb1` allows legacy SMB clients to connect to the server when enabled.

        `localmaster` when set, determines if the system participates in a browser election.

        `domain_logons` is used to provide netlogin service for older Windows clients if enabled.

        `guest` attribute is specified to select the account to be used for guest access. It defaults to "nobody".

        `nullpw` when enabled allows the users to authorize access without a password.

        `hostlookup` when enabled, allows using hostnames rather then IP addresses in "hostsallow"/"hostsdeny" fields
        of SMB Shares.
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()
        await self.validate_smb(new, verrors)
        verrors.check()

        if new['admin_group'] and new['admin_group'] != old['admin_group']:
            await self.middleware.call('smb.add_admin_group',
                                       new['admin_group'])

        # TODO: consider using bidict
        for k, v in LOGLEVEL_MAP.items():
            if new['loglevel'] == v:
                new['loglevel'] = k
                break

        await self.compress(new)

        await self._update_service(old, new)
        await self.reset_smb_ha_mode()

        return await self.config()

    @private
    async def compress(self, data):
        data['netbiosalias'] = ' '.join(data['netbiosalias'])
        data.pop('netbiosname_local', None)
        data.pop('next_rid')
        return data
예제 #9
0
class SharingSMBService(SharingService):

    share_task_type = 'SMB'

    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @private
    async def strip_comments(self, data):
        parsed_config = ""
        for entry in data['auxsmbconf'].splitlines():
            if entry == "" or entry.startswith(('#', ';')):
                continue
            parsed_config += entry if len(parsed_config) == 0 else f'\n{entry}'

        data['auxsmbconf'] = parsed_config

    @accepts(
        Dict(
            'sharingsmb_create',
            Str('purpose',
                enum=[x.name for x in SMBSharePreset],
                default=SMBSharePreset.DEFAULT_SHARE.name),
            Str('path', required=True),
            Str('path_suffix', default=''),
            Bool('home', default=False),
            Str('name', max_length=80),
            Str('comment', default=''),
            Bool('ro', default=False),
            Bool('browsable', default=True),
            Bool('timemachine', default=False),
            Bool('recyclebin', default=False),
            Bool('guestok', default=False),
            Bool('abe', default=False),
            List('hostsallow', default=[]),
            List('hostsdeny', default=[]),
            Bool('aapl_name_mangling', default=False),
            Bool('acl', default=True),
            Bool('durablehandle', default=True),
            # shadowcopy only available for FreeBSD (for now)
            Bool('shadowcopy', default=osc.IS_FREEBSD),
            Bool('streams', default=True),
            Bool('fsrvp', default=False),
            Str('auxsmbconf', max_length=None, default=''),
            Bool('enabled', default=True),
            register=True))
    async def do_create(self, data):
        """
        Create a SMB Share.

        `purpose` applies common configuration presets depending on intended purpose.

        `timemachine` when set, enables Time Machine backups for this share.

        `ro` when enabled, prohibits write access to the share.

        `guestok` when enabled, allows access to this share without a password.

        `hostsallow` is a list of hostnames / IP addresses which have access to this share.

        `hostsdeny` is a list of hostnames / IP addresses which are not allowed access to this share. If a handful
        of hostnames are to be only allowed access, `hostsdeny` can be passed "ALL" which means that it will deny
        access to ALL hostnames except for the ones which have been listed in `hostsallow`.

        `acl` enables support for storing the SMB Security Descriptor as a Filesystem ACL.

        `streams` enables support for storing alternate datastreams as filesystem extended attributes.

        `fsrvp` enables support for the filesystem remote VSS protocol. This allows clients to create
        ZFS snapshots through RPC.

        `shadowcopy` enables support for the volume shadow copy service.

        `auxsmbconf` is a string of additional smb4.conf parameters not covered by the system's API.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.apply_presets(data)
        await self.compress(data)
        vuid = await self.generate_vuid(data['timemachine'])
        data.update({'vuid': vuid})
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.strip_comments(data)
        await self.middleware.call('sharing.smb.reg_addshare', data)
        enable_aapl = await self.check_aapl(data)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return await self.get_instance(data['id'])

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update SMB Share of `id`.
        """
        verrors = ValidationErrors()
        path = data.get('path')

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        oldname = 'homes' if old['home'] else old['name']
        newname = 'homes' if new['home'] else new['name']

        new['vuid'] = await self.generate_vuid(new['timemachine'], new['vuid'])
        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        if old['purpose'] != new['purpose']:
            await self.apply_presets(new)

        old_is_locked = (await self.get_instance(id))['locked']
        if old['path'] != new['path']:
            new_is_locked = await self.middleware.call(
                'pool.dataset.path_in_locked_datasets', new['path'])
        else:
            new_is_locked = old_is_locked

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.strip_comments(new)
        if not new_is_locked:
            """
            Enabling AAPL SMB2 extensions globally affects SMB shares. If this
            happens, the SMB service _must_ be restarted. Skip this step if dataset
            underlying the new path is encrypted.
            """
            enable_aapl = await self.check_aapl(new)
        else:
            enable_aapl = False
        """
        OLD    NEW   = dataset path is encrypted
         ----------
         -      -    = pre-12 behavior. Remove and replace if name changed, else update.
         -      X    = Delete share from running configuration
         X      -    = Add share to running configuration
         X      X    = no-op
        """
        if old_is_locked and new_is_locked:
            """
            Configuration change only impacts a locked SMB share. From standpoint of
            running config, this is a no-op. No need to restart or reload service.
            """
            return await self.get_instance(id)

        elif not old_is_locked and not new_is_locked:
            """
            Default behavior before changes for locked datasets.
            """
            if newname != oldname:
                # This is disruptive change. Share is actually being removed and replaced.
                # Forcibly closes any existing SMB sessions.
                await self.close_share(oldname)
                try:
                    await self.middleware.call('sharing.smb.reg_delshare',
                                               oldname)
                except Exception:
                    self.logger.warning('Failed to remove stale share [%s]',
                                        old['name'],
                                        exc_info=True)
                await self.middleware.call('sharing.smb.reg_addshare', new)
            else:
                diff = await self.middleware.call(
                    'sharing.smb.diff_middleware_and_registry', new['name'],
                    new)
                if diff is None:
                    await self.middleware.call('sharing.smb.reg_addshare', new)
                else:
                    share_name = new['name'] if not new['home'] else 'homes'
                    await self.middleware.call('sharing.smb.apply_conf_diff',
                                               'REGISTRY', share_name, diff)

        elif old_is_locked and not new_is_locked:
            """
            Since the old share was not in our running configuration, we need
            to add it.
            """
            await self.middleware.call('sharing.smb.reg_addshare', new)

        elif not old_is_locked and new_is_locked:
            try:
                await self.middleware.call('sharing.smb.reg_delshare', oldname)
            except Exception:
                self.logger.warning('Failed to remove locked share [%s]',
                                    old['name'],
                                    exc_info=True)

        if enable_aapl:
            await self._service_change('cifs', 'restart')
        else:
            await self._service_change('cifs', 'reload')

        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete SMB Share of `id`. This will forcibly disconnect SMB clients
        that are accessing the share.
        """
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.close_share(share['name'])
        try:
            await self.middleware.call(
                'smb.sharesec._delete',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.debug('Failed to delete share ACL for [%s].',
                              share['name'],
                              exc_info=True)

        try:
            await self.middleware.call(
                'sharing.smb.reg_delshare',
                share['name'] if not share['home'] else 'homes')
        except Exception:
            self.logger.warn('Failed to remove registry entry for [%s].',
                             share['name'],
                             exc_info=True)

        if share['timemachine']:
            await self.middleware.call('service.restart', 'mdns')

        return result

    @private
    async def check_aapl(self, data):
        """
        Returns whether we changed the global aapl support settings.
        """
        aapl_extensions = (
            await self.middleware.call('smb.config'))['aapl_extensions']

        if not aapl_extensions and data['timemachine']:
            await self.middleware.call('datastore.update', 'services_cifs', 1,
                                       {'cifs_srv_aapl_extensions': True})
            return True

        return False

    @private
    async def close_share(self, share_name):
        c = await run(
            [SMBCmd.SMBCONTROL.value, 'smbd', 'close-share', share_name],
            check=False)
        if c.returncode != 0:
            self.logger.warn('Failed to close smb share [%s]: [%s]',
                             share_name,
                             c.stderr.decode().strip())

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate_aux_params(self, data, schema_name):
        """
        libsmbconf expects to be provided with key-value pairs.
        """
        verrors = ValidationErrors()
        for entry in data.splitlines():
            if entry == '' or entry.startswith(('#', ';')):
                continue

            kv = entry.split('=', 1)
            if len(kv) != 2:
                verrors.add(
                    f'{schema_name}.auxsmbconf',
                    f'Auxiliary parameters must be in the format of "key = value": {entry}'
                )
                continue

        verrors.check()

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await self.validate_path_field(data, schema_name, verrors)

        if data['auxsmbconf']:
            try:
                await self.validate_aux_params(data['auxsmbconf'],
                                               f'{schema_name}.auxsmbconf')
            except ValidationErrors as errs:
                verrors.add_child(f'{schema_name}.auxsmbconf', errs)

        if not data['acl'] and not await self.middleware.call(
                'filesystem.acl_is_trivial', data['path']):
            verrors.add(
                f'{schema_name}.acl',
                f'ACL detected on {data["path"]}. ACLs must be stripped prior to creation '
                'of SMB share.')

        if data.get('name') and data['name'].lower() in [
                'global', 'homes', 'printers'
        ]:
            verrors.add(
                f'{schema_name}.name',
                f'{data["name"]} is a reserved section name, please select another one'
            )

        if osc.IS_LINUX:
            if data['shadowcopy']:
                verrors.add(
                    f'{schema_name}.shadowcopy',
                    'ZFS shadow copy support is not yet implemented in TrueNAS scale'
                )
            if data['fsrvp']:
                verrors.add(
                    f'{schema_name}.fsrvp',
                    'ZFS fsrvp support is not yet implemented in TrueNAS scale'
                )

        if data.get('path_suffix') and len(data['path_suffix'].split('/')) > 2:
            verrors.add(
                f'{schema_name}.name',
                'Path suffix may not contain more than two components.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def auxsmbconf_dict(self, aux, direction="TO"):
        ret = None
        if direction == 'TO':
            ret = {}
            for entry in aux.splitlines():
                if entry == '':
                    continue

                if entry.startswith(('#', ';')):
                    # Special handling for comments
                    ret[entry] = None
                    continue

                kv = entry.split('=', 1)
                ret[kv[0].strip()] = kv[1].strip()

            return ret

        if direction == 'FROM':
            return '\n'.join(
                [f'{k}={v}' if v is not None else k for k, v in aux.items()])

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        name_filters = [('name', '=', name)]

        if id is not None:
            name_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        if data['fsrvp']:
            data['shadowcopy'] = True

        if 'share_acl' in data:
            data.pop('share_acl')

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data.pop(self.locked_field, None)

        return data

    @private
    async def generate_vuid(self, timemachine, vuid=""):
        try:
            if timemachine and vuid:
                uuid.UUID(vuid, version=4)
        except ValueError:
            self.logger.debug(
                f"Time machine VUID string ({vuid}) is invalid. Regenerating.")
            vuid = ""

        if timemachine and not vuid:
            vuid = str(uuid.uuid4())

        return vuid

    @private
    async def apply_presets(self, data):
        """
        Apply settings from presets. Only include auxiliary parameters
        from preset if user-defined aux parameters already exist. In this
        case user-defined takes precedence.
        """
        params = (SMBSharePreset[data["purpose"]].value)["params"].copy()
        aux = params.pop("auxsmbconf")
        data.update(params)
        if data["auxsmbconf"]:
            preset_aux = await self.auxsmbconf_dict(aux, direction="TO")
            data_aux = await self.auxsmbconf_dict(data["auxsmbconf"],
                                                  direction="TO")
            preset_aux.update(data_aux)
            data["auxsmbconf"] = await self.auxsmbconf_dict(preset_aux,
                                                            direction="FROM")

        return data

    @accepts()
    async def presets(self):
        """
        Retrieve pre-defined configuration sets for specific use-cases. These parameter
        combinations are often non-obvious, but beneficial in these scenarios.
        """
        return {x.name: x.value for x in SMBSharePreset}

    @private
    async def sync_registry(self):
        """
        Synchronize registry config with the share configuration in the truenas config
        file. This method simply reconciles lists of shares, removing from and adding to
        the registry as-needed.
        """
        if not os.path.exists(SMBPath.GLOBALCONF.platform()):
            self.logger.warning(
                "smb.conf does not exist. Skipping registry synchronization."
                "This may indicate that SMB service has not completed initialization."
            )
            return

        active_shares = await self.query([('locked', '=', False),
                                          ('enabled', '=', True)])
        registry_shares = await self.middleware.call(
            'sharing.smb.reg_listshares')
        cf_active = set([x['name'].casefold() for x in active_shares])
        cf_reg = set([x.casefold() for x in registry_shares])
        to_add = cf_active - cf_reg
        to_del = cf_reg - cf_active

        for share in to_add:
            share_conf = list(
                filter(lambda x: x['name'].casefold() == share.casefold(),
                       active_shares))
            if not os.path.exists(share_conf[0]['path']):
                self.logger.warning(
                    "Path [%s] for share [%s] does not exist. "
                    "Refusing to add share to SMB configuration.",
                    share_conf[0]['path'], share_conf[0]['name'])
                continue

            try:
                await self.middleware.call('sharing.smb.reg_addshare',
                                           share_conf[0])
            except Exception:
                self.logger.warning(
                    "Failed to add SMB share [%] while synchronizing registry config",
                    share,
                    exc_info=True)

        for share in to_del:
            await self.middleware.call('sharing.smb.close_share', share)
            try:
                await self.middleware.call('sharing.smb.reg_delshare', share)
            except Exception:
                self.middleware.logger.warning(
                    'Failed to remove stale share [%s]', share, exc_info=True)
예제 #10
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'
        cli_namespace = 'task.rsync'

    ENTRY = Patch(
        'rsync_task_create',
        'rsync_task_entry',
        ('rm', {
            'name': 'validate_rpath'
        }),
        ('add', Int('id')),
        ('add', Bool('locked')),
        ('add', Dict('job', null=True, additional_attrs=True)),
    )

    @private
    async def rsync_task_extend(self, data, context):
        try:
            data['extra'] = shlex.split(data['extra'].replace('"',
                                                              r'"\"').replace(
                                                                  "'", r'"\"'))
        except ValueError:
            # This is to handle the case where the extra value is misconfigured for old cases
            # Moving on, we are going to verify that it can be split successfully using shlex
            data['extra'] = data['extra'].split()

        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, rows, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        data['extra'] = ' '.join(data['extra'])
        try:
            shlex.split(data['extra'].replace('"',
                                              r'"\"').replace("'", r'"\"'))
        except ValueError as e:
            verrors.add(f'{schema}.extra', f'Please specify valid value: {e}')

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self.get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-zz'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @returns()
    @job(lock=lambda args: args[-1], lock_queue_size=1, logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        self.middleware.call_sync('network.general.will_perform_activity',
                                  'rsync')

        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            err = None
            if cp.returncode == RsyncReturnCode.STREAMIO and rsync['compress']:
                err = (
                    "rsync command with compression enabled failed with STREAMIO error. "
                    "This may indicate that remote server lacks support for the new-style "
                    "compression used by TrueNAS.")

            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            if err:
                msg = f'{err} Check logs for further information'
            else:
                try:
                    rc_name = RsyncReturnCode(cp.returncode).name
                except ValueError:
                    rc_name = 'UNKNOWN'

                msg = (f'rsync command returned {cp.returncode} - {rc_name}. '
                       'Check logs for further information.')
            raise CallError(msg)

        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })
예제 #11
0
파일: cloud_sync.py 프로젝트: tejp/freenas
class CloudSyncService(CRUDService):

    local_fs_lock_manager = FsLockManager()
    remote_fs_lock_manager = FsLockManager()

    class Config:
        datastore = "tasks.cloudsync"
        datastore_extend = "cloudsync._extend"

    @filterable
    async def query(self, filters=None, options=None):
        """
        Query all Cloud Sync Tasks with `query-filters` and `query-options`.
        """
        tasks_or_task = await super().query(filters, options)

        jobs = {}
        for j in await self.middleware.call("core.get_jobs", [("method", "=", "cloudsync.sync")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        if isinstance(tasks_or_task, list):
            for task in tasks_or_task:
                task["job"] = jobs.get(task["id"])
        else:
            tasks_or_task["job"] = jobs.get(tasks_or_task["id"])

        return tasks_or_task

    @private
    async def _extend(self, cloud_sync):
        cloud_sync["credentials"] = cloud_sync.pop("credential")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_password"]
        )
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.decrypt", cloud_sync["encryption_salt"]
        )

        Cron.convert_db_format_to_schedule(cloud_sync)

        return cloud_sync

    @private
    async def _compress(self, cloud_sync):
        cloud_sync["credential"] = cloud_sync.pop("credentials")

        cloud_sync["encryption_password"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_password"]
        )
        cloud_sync["encryption_salt"] = await self.middleware.call(
            "pwenc.encrypt", cloud_sync["encryption_salt"]
        )

        Cron.convert_schedule_to_db_format(cloud_sync)

        cloud_sync.pop('job', None)

        return cloud_sync

    @private
    async def _get_credentials(self, credentials_id):
        try:
            return await self.middleware.call("datastore.query", "system.cloudcredentials",
                                              [("id", "=", credentials_id)], {"get": True})
        except IndexError:
            return None

    @private
    async def _basic_validate(self, verrors, name, data):
        if data["encryption"]:
            if not data["encryption_password"]:
                verrors.add(f"{name}.encryption_password", "This field is required when encryption is enabled")

        credentials = await self._get_credentials(data["credentials"])
        if not credentials:
            verrors.add(f"{name}.credentials", "Invalid credentials")

        try:
            shlex.split(data["args"])
        except ValueError as e:
            verrors.add(f"{name}.args", f"Parse error: {e.args[0]}")

        if verrors:
            raise verrors

        provider = REMOTES[credentials["provider"]]

        schema = []

        if provider.buckets:
            schema.append(Str("bucket", required=True, empty=False))

        schema.append(Str("folder", required=True))

        schema.extend(provider.task_schema)

        schema.extend(self.common_task_schema(provider))

        attributes_verrors = validate_attributes(schema, data, additional_attrs=True)

        if not attributes_verrors:
            await provider.pre_save_task(data, credentials, verrors)

        verrors.add_child(f"{name}.attributes", attributes_verrors)

    @private
    async def _validate(self, verrors, name, data):
        await self._basic_validate(verrors, name, data)

        for i, (limit1, limit2) in enumerate(zip(data["bwlimit"], data["bwlimit"][1:])):
            if limit1["time"] >= limit2["time"]:
                verrors.add(f"{name}.bwlimit.{i + 1}.time", f"Invalid time order: {limit1['time']}, {limit2['time']}")

        if data["snapshot"]:
            if data["direction"] != "PUSH":
                verrors.add(f"{name}.snapshot", "This option can only be enabled for PUSH tasks")

    @private
    async def _validate_folder(self, verrors, name, data):
        if data["direction"] == "PULL":
            folder = data["attributes"]["folder"].rstrip("/")
            if folder:
                folder_parent = os.path.normpath(os.path.join(folder, ".."))
                if folder_parent == ".":
                    folder_parent = ""
                folder_basename = os.path.basename(folder)
                ls = await self.list_directory(dict(
                    credentials=data["credentials"],
                    encryption=data["encryption"],
                    filename_encryption=data["filename_encryption"],
                    encryption_password=data["encryption_password"],
                    encryption_salt=data["encryption_salt"],
                    attributes=dict(data["attributes"], folder=folder_parent),
                    args=data["args"],
                ))
                for item in ls:
                    if item["Name"] == folder_basename:
                        if not item["IsDir"]:
                            verrors.add(f"{name}.attributes.folder", "This is not a directory")
                        break
                else:
                    verrors.add(f"{name}.attributes.folder", "Directory does not exist")

        if data["direction"] == "PUSH":
            credentials = await self._get_credentials(data["credentials"])

            provider = REMOTES[credentials["provider"]]

            if provider.readonly:
                verrors.add(f"{name}.direction", "This remote is read-only")

    @accepts(Dict(
        "cloud_sync_create",
        Str("description", default=""),
        Str("direction", enum=["PUSH", "PULL"], required=True),
        Str("transfer_mode", enum=["SYNC", "COPY", "MOVE"], required=True),
        Str("path", required=True),
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Cron(
            "schedule",
            defaults={"minute": "00"},
            required=True
        ),
        Bool("follow_symlinks", default=False),
        Int("transfers", null=True, default=None, validators=[Range(min=1)]),
        List("bwlimit", default=[], items=[Dict("cloud_sync_bwlimit",
                                                Str("time", validators=[Time()]),
                                                Int("bandwidth", validators=[Range(min=1)], null=True))]),
        List("exclude", default=[], items=[Str("path", empty=False)]),
        Dict("attributes", additional_attrs=True, required=True),
        Bool("snapshot", default=False),
        Str("pre_script", default=""),
        Str("post_script", default=""),
        Str("args", default=""),
        Bool("enabled", default=True),
        register=True,
    ))
    async def do_create(self, cloud_sync):
        """
        Creates a new cloud_sync entry.

        .. examples(websocket)::

          Create a new cloud_sync using amazon s3 attributes, which is supposed to run every hour.

            :::javascript
            {
              "id": "6841f242-840a-11e6-a437-00e04d680384",
              "msg": "method",
              "method": "cloudsync.create",
              "params": [{
                "description": "s3 sync",
                "path": "/mnt/tank",
                "credentials": 1,
                "minute": "00",
                "hour": "*",
                "daymonth": "*",
                "month": "*",
                "attributes": {
                  "bucket": "mybucket",
                  "folder": ""
                },
                "enabled": true
              }]
            }
        """

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        cloud_sync["id"] = await self.middleware.call("datastore.insert", "tasks.cloudsync", cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"), Patch("cloud_sync_create", "cloud_sync_update", ("attr", {"update": True})))
    async def do_update(self, id, data):
        """
        Updates the cloud_sync entry `id` with `data`.
        """
        cloud_sync = await self._get_instance(id)

        # credentials is a foreign key for now
        if cloud_sync["credentials"]:
            cloud_sync["credentials"] = cloud_sync["credentials"]["id"]

        cloud_sync.update(data)

        verrors = ValidationErrors()

        await self._validate(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        await self._validate_folder(verrors, "cloud_sync_update", cloud_sync)

        if verrors:
            raise verrors

        cloud_sync = await self._compress(cloud_sync)

        await self.middleware.call("datastore.update", "tasks.cloudsync", id, cloud_sync)
        await self.middleware.call("service.restart", "cron")

        cloud_sync = await self._extend(cloud_sync)
        return cloud_sync

    @accepts(Int("id"))
    async def do_delete(self, id):
        """
        Deletes cloud_sync entry `id`.
        """
        await self.middleware.call("datastore.delete", "tasks.cloudsync", id)
        await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", id)
        await self.middleware.call("service.restart", "cron")

    @accepts(Int("credentials_id"))
    async def list_buckets(self, credentials_id):
        credentials = await self._get_credentials(credentials_id)
        if not credentials:
            raise CallError("Invalid credentials")

        provider = REMOTES[credentials["provider"]]

        if not provider.buckets:
            raise CallError("This provider does not use buckets")

        return await self.ls({"credentials": credentials}, "")

    @accepts(Dict(
        "cloud_sync_ls",
        Int("credentials", required=True),
        Bool("encryption", default=False),
        Bool("filename_encryption", default=False),
        Str("encryption_password", default=""),
        Str("encryption_salt", default=""),
        Dict("attributes", required=True, additional_attrs=True),
        Str("args", default=""),
    ))
    async def list_directory(self, cloud_sync):
        """
        List contents of a remote bucket / directory.

        If remote supports buckets, path is constructed by two keys "bucket"/"folder" in `attributes`.
        If remote does not support buckets, path is constructed using "folder" key only in `attributes`.
        "folder" is directory name and "bucket" is bucket name for remote.

        Path examples:

        S3 Service
        `bucketname/directory/name`

        Dropbox Service
        `directory/name`


        `credentials` is a valid id of a Cloud Sync Credential which will be used to connect to the provider.
        """
        verrors = ValidationErrors()

        await self._basic_validate(verrors, "cloud_sync", dict(cloud_sync))

        if verrors:
            raise verrors

        credentials = await self._get_credentials(cloud_sync["credentials"])

        path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])

        return await self.ls(dict(cloud_sync, credentials=credentials), path)

    @private
    async def ls(self, config, path):
        async with RcloneConfig(config) as config:
            proc = await run(["rclone", "--config", config.config_path, "lsjson", "remote:" + path],
                             check=False, encoding="utf8")
            if proc.returncode == 0:
                return json.loads(proc.stdout)
            else:
                raise CallError(proc.stderr)

    @item_method
    @accepts(Int("id"))
    @job(lock=lambda args: "cloud_sync:{}".format(args[-1]), lock_queue_size=1, logs=True)
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync["direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync["direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(0, f"Locking local path {local_path!r} for {directions[local_direction]}")
        async with self.local_fs_lock_manager.lock(local_path, local_direction):
            job.set_progress(0, f"Locking remote path {remote_path!r} for {directions[remote_direction]}")
            async with self.remote_fs_lock_manager.lock(f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync)
                    await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", cloud_sync["id"])
                except Exception:
                    await self.middleware.call("alert.oneshot_create", "CloudSyncTaskFailed", {
                        "id": cloud_sync["id"],
                        "name": cloud_sync["description"],
                    })
                    raise

    @accepts()
    async def providers(self):
        """
        Returns a list of dictionaries of supported providers for Cloud Sync Tasks.

        `credentials_schema` is JSON schema for credentials attributes.

        `task_schema` is JSON schema for task attributes.

        `buckets` is a boolean value which is set to "true" if provider supports buckets.

        Example of a single provider:

        [
            {
                "name": "AMAZON_CLOUD_DRIVE",
                "title": "Amazon Cloud Drive",
                "credentials_schema": [
                    {
                        "property": "client_id",
                        "schema": {
                            "title": "Amazon Application Client ID",
                            "_required_": true,
                            "type": "string"
                        }
                    },
                    {
                        "property": "client_secret",
                        "schema": {
                            "title": "Application Key",
                            "_required_": true,
                            "type": "string"
                        }
                    }
                ],
                "credentials_oauth": null,
                "buckets": false,
                "bucket_title": "Bucket",
                "task_schema": []
            }
        ]
        """
        return sorted(
            [
                {
                    "name": provider.name,
                    "title": provider.title,
                    "credentials_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.credentials_schema
                    ],
                    "credentials_oauth": f"{OAUTH_URL}/{provider.name.lower()}" if provider.credentials_oauth else None,
                    "buckets": provider.buckets,
                    "bucket_title": provider.bucket_title,
                    "task_schema": [
                        {
                            "property": field.name,
                            "schema": field.to_json_schema()
                        }
                        for field in provider.task_schema + self.common_task_schema(provider)
                    ],
                }
                for provider in REMOTES.values()
            ],
            key=lambda provider: provider["title"].lower()
        )

    def common_task_schema(self, provider):
        schema = []

        if provider.fast_list:
            schema.append(Bool("fast_list", default=False, title="Use --fast-list", description=textwrap.dedent("""\
                Use fewer transactions in exchange for more RAM. This may also speed up or slow down your
                transfer. See [rclone documentation](https://rclone.org/docs/#fast-list) for more details.
            """).rstrip()))

        return schema
예제 #12
0
class RsyncModService(SharingService):

    share_task_type = 'Rsync Module'

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'
        cli_namespace = 'service.rsync_mod'

    ENTRY = Patch(
        'rsyncmod_create',
        'rsyncmod_entry',
        ('add', Bool('locked')),
        ('add', Int('id')),
    )

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await self.validate_path_field(data, schema_name, verrors)

        for entity in ('user', 'group'):
            value = data.get(entity)
            try:
                await self.middleware.call(f'{entity}.get_{entity}_obj',
                                           {f'{entity}name': value})
            except Exception:
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Bool('enabled', default=True),
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('mode', enum=['RO', 'RW', 'WO'], required=True),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')]),
            List('hostsdeny', items=[Str('hostdeny')]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self.get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')
        module.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
예제 #13
0
class ACLBase(ServicePartBase):

    @accepts(
        Dict(
            'filesystem_acl',
            Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List(
                'dacl',
                items=[
                    Dict(
                        'aclentry',
                        Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                        Int('id', null=True),
                        Str('type', enum=['ALLOW', 'DENY']),
                        Dict(
                            'perms',
                            Bool('READ_DATA'),
                            Bool('WRITE_DATA'),
                            Bool('APPEND_DATA'),
                            Bool('READ_NAMED_ATTRS'),
                            Bool('WRITE_NAMED_ATTRS'),
                            Bool('EXECUTE'),
                            Bool('DELETE_CHILD'),
                            Bool('READ_ATTRIBUTES'),
                            Bool('WRITE_ATTRIBUTES'),
                            Bool('DELETE'),
                            Bool('READ_ACL'),
                            Bool('WRITE_ACL'),
                            Bool('WRITE_OWNER'),
                            Bool('SYNCHRONIZE'),
                            Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'TRAVERSE']),
                        ),
                        Dict(
                            'flags',
                            Bool('FILE_INHERIT'),
                            Bool('DIRECTORY_INHERIT'),
                            Bool('NO_PROPAGATE_INHERIT'),
                            Bool('INHERIT_ONLY'),
                            Bool('INHERITED'),
                            Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                        ),
                    ),
                    Dict(
                        'posix1e_ace',
                        Bool('default', default=False),
                        Str('tag', enum=['USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP', 'OTHER', 'MASK']),
                        Int('id', default=-1),
                        Dict(
                            'perms',
                            Bool('READ', default=False),
                            Bool('WRITE', default=False),
                            Bool('EXECUTE', default=False),
                        ),
                    )
                ],
                default=[]
            ),
            Dict(
                'nfs41_flags',
                Bool('autoinherit', default=False),
                Bool('protected', default=False),
            ),
            Str('acltype', enum=[x.name for x in ACLType], default=ACLType.NFS4.name),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
                Bool('canonicalize', default=True)
            )
        )
    )
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` ACL entries. Formatting depends on the underlying `acltype`. NFS4ACL requires
        NFSv4 entries. POSIX1e requires POSIX1e entries.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL). This only applies to NFSv4 ACLs.

        For case of NFSv4 ACLs  USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path. This may return a POSIX1e ACL or a NFSv4 ACL. The acl type is indicated
        by the `ACLType` key.

        Errata about ACLType NFSv4:

        `simplified` returns a shortened form of the ACL permset and flags.

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """

    @accepts(
        Dict(
            'filesystem_ownership',
            Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('recursive', default=False),
                Bool('traverse', default=False)
            )
        )
    )
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """

    @accepts(
        Dict(
            'filesystem_permission',
            Str('path', required=True),
            UnixPerm('mode', null=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )
        )
    )
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """

    @accepts(
        Str('acl_type', default='OPEN', enum=ACLDefault.options()),
        Str('share_type', default='NONE', enum=['NONE', 'AFP', 'SMB', 'NFS']),
    )
    async def get_default_acl(self, acl_type, share_type):
        """
예제 #14
0
                'options.type',
                f'Requested ICMPv6 protocol, but the address provided "{addr}" is not a valid IPv6 address.'
            )
        verrors.check()

        ping_host = False
        if addr.version == 4:
            ping_host = self._ping_host(ip, options['timeout'])
        elif addr.version == 6:
            ping_host = self._ping6_host(ip, options['timeout'])

        return ping_host

    @accepts(
        Str('method'),
        List('args'),
        Str('filename'),
    )
    async def download(self, method, args, filename):
        """
        Core helper to call a job marked for download.

        Returns the job id and the URL for download.
        """
        job = await self.middleware.call(method, *args, pipes=Pipes(output=self.middleware.pipe()))
        token = await self.middleware.call('auth.generate_token', 300, {'filename': filename, 'job': job.id})
        self.middleware.fileapp.register_job(job.id)
        return job.id, f'/_download/{job.id}?auth_token={token}'

    def __kill_multiprocessing(self):
        # We need to kill this because multiprocessing has passed it stderr fd which is /var/log/middlewared.log
예제 #15
0
class KubernetesNodeService(ConfigService):

    class Config:
        namespace = 'k8s.node'
        private = True

    async def config(self):
        try:
            async with api_client({'node': True}) as (api, context):
                return {
                    'node_configured': True,
                    'events': await self.middleware.call('k8s.event.query', [], {
                        'extra': {'field_selector': f'involvedObject.uid={NODE_NAME}'}
                    }),
                    **(context['node'].to_dict())
                }
        except Exception as e:
            return {'node_configured': False, 'error': str(e)}

    @accepts(
        List(
            'add_taints',
            items=[Dict(
                'taint',
                Str('key', required=True, empty=False),
                Str('value', null=True, default=None),
                Str('effect', required=True, empty=False, enum=['NoSchedule', 'NoExecute'])
            )],
        )
    )
    async def add_taints(self, taints):
        async with api_client({'node': True}) as (api, context):
            for taint in taints:
                await nodes.add_taint(context['core_api'], taint, context['node'])

        remaining_taints = {t['key'] for t in taints}
        timeout = 600
        while remaining_taints and timeout > 0:
            await asyncio.sleep(3)
            timeout -= 3

            config = await self.config()
            if not config['node_configured']:
                break

            remaining_taints -= {t['key'] for t in (config['spec']['taints'] or [])}

    @accepts(
        List('remove_taints', items=[Str('taint_key')]),
    )
    async def remove_taints(self, taint_keys):
        async with api_client({'node': True}) as (api, context):
            for taint_key in taint_keys:
                await nodes.remove_taint(context['core_api'], taint_key, context['node'])

    @accepts()
    async def delete_node(self):
        async with api_client({'node': True}) as (api, context):
            await context['core_api'].delete_node(NODE_NAME)

    @accepts()
    async def worker_node_password(self):
        return KUBERNETES_WORKER_NODE_PASSWORD
예제 #16
0
class StatsService(Service):
    @accepts()
    def get_sources(self):
        """
        Returns an object with all available sources tried with metric datasets.
        """
        sources = {}
        if not os.path.exists(RRD_PATH):
            return {}
        for i in glob.glob('{}/*/*.rrd'.format(RRD_PATH)):
            source, metric = i.replace(RRD_PATH, '').split('/', 1)
            if metric.endswith('.rrd'):
                metric = metric[:-4]
            if source not in sources:
                sources[source] = []
            sources[source].append(metric)
        return sources

    @accepts(Str('source'), Str('type'))
    async def get_dataset_info(self, source, _type):
        """
        Returns info about a given dataset from some source.
        """
        rrdfile = '{}/{}/{}.rrd'.format(RRD_PATH, source, _type)
        proc = await Popen(
            ['/usr/local/bin/rrdtool', 'info', rrdfile],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        data, err = await proc.communicate()
        if proc.returncode != 0:
            raise ValueError('rrdtool failed: {}'.format(err.decode()))
        data = data.decode()

        info = {'source': source, 'type': _type, 'datasets': {}}
        for dataset, _type in RE_DSTYPE.findall(data):
            info['datasets'][dataset] = {'type': _type}

        reg = RE_STEP.search(data)
        if reg:
            info['step'] = int(reg.group(1))
        reg = RE_LAST_UPDATE.search(data)
        if reg:
            info['last_update'] = int(reg.group(1))
        return info

    @accepts(
        List('stats_list',
             items=[
                 Dict(
                     'stats-data',
                     Str('source'),
                     Str('type'),
                     Str('dataset'),
                     Str('cf', default='AVERAGE'),
                     additional_attrs=False,
                 )
             ]),
        Dict(
            'stats-filter',
            Int('step', default=10),
            Str('start', default='now-1h'),
            Str('end', default='now'),
        ),
    )
    async def get_data(self, data_list, stats):
        """
        Get data points from rrd files.
        """
        if not data_list:
            raise ValidationError('stats_list',
                                  'This parameter cannot be empty')

        defs = []
        names_pair = []
        for i, data in enumerate(data_list):
            names_pair.append([data['source'], data['type']])
            rrdfile = '{}/{}/{}.rrd'.format(RRD_PATH, data['source'],
                                            data['type'])
            defs.extend([
                'DEF:xxx{}={}:{}:{}'.format(i, rrdfile, data['dataset'],
                                            data['cf']),
                'XPORT:xxx{}:{}/{}'.format(i, data['source'], data['type']),
            ])
        proc = await Popen(
            [
                '/usr/local/bin/rrdtool',
                'xport',
                '--json',
                '--start',
                stats['start'],
                '--end',
                stats['end'],
            ] + (['--step', str(stats['step'])] if stats.get('step') else []) +
            defs,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )
        data, err = await proc.communicate()
        if proc.returncode != 0:
            raise CallError('rrdtool failed: {}'.format(err.decode()))
        data = json.loads(data.decode())

        # Custom about property
        data['about'] = 'Data for ' + ','.join(
            ['/'.join(i) for i in names_pair])
        return data
예제 #17
0
class FilesystemService(Service):

    @accepts(Str('path', required=True), Ref('query-filters'), Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name': entry.name,
                'path': entry.path,
                'realpath': os.path.realpath(entry.path) if etype == 'SYMLINK' else entry.path,
                'type': etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size': stat.st_size,
                    'mode': stat.st_mode,
                    'uid': stat.st_uid,
                    'gid': stat.st_gid,
                })
            except FileNotFoundError:
                data.update({'size': None, 'mode': None, 'uid': None, 'gid': None})
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        if os.path.exists(os.path.join(path, ".windows")):
            stat["acl"] = "windows"
        elif os.path.exists(os.path.join(path, ".mac")):
            stat["acl"] = "mac"
        else:
            stat["acl"] = "unix"

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f, job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        ACL is trivial if it can be fully expressed as a file mode without losing
        any access rules. This is intended to be used as a check before allowing
        users to chmod() through the webui
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)
        a = acl.ACL(file=path)
        return a.is_trivial

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.
        Simplified returns a shortened form of the ACL permset and flags
        - TRAVERSE = sufficient rights to traverse a directory, but not read contents.
        - READ = sufficient rights to traverse a directory, and read file contents.
        - MODIFIY = sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.
        - FULL_CONTROL = all permissions.
        - OTHER = does not fit into any of the above categories without losing information.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    self.logger.debug('detected hidden ace')
                    continue
                advanced_acl.append(ace)
            return advanced_acl

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {'BASIC': self.__convert_to_basic_permset(entry['perms'])},
                    'flags': {'BASIC': self.__convert_to_basic_flagset(entry['flags'])},
                }
                if ace['tag'] == 'everyone@' and ace['perms']['BASIC'] == 'NOPERMS':
                    continue
                simple_acl.append(ace)

            return simple_acl

    @accepts(
        Str('path'),
        List(
            'dacl',
            items=[
                Dict(
                    'aclentry',
                    Str('tag', enum=['owner@', 'group@', 'everyone@', 'USER', 'GROUP']),
                    Int('id', null=True),
                    Str('type', enum=['ALLOW', 'DENY']),
                    Dict(
                        'perms',
                        Bool('READ_DATA'),
                        Bool('WRITE_DATA'),
                        Bool('APPEND_DATA'),
                        Bool('READ_NAMED_ATTRS'),
                        Bool('WRITE_NAMED_ATTRS'),
                        Bool('EXECUTE'),
                        Bool('DELETE_CHILD'),
                        Bool('READ_ATTRIBUTES'),
                        Bool('WRITE_ATTRIBUTES'),
                        Bool('DELETE'),
                        Bool('READ_ACL'),
                        Bool('WRITE_ACL'),
                        Bool('WRITE_OWNER'),
                        Bool('SYNCHRONIZE'),
                        Str('BASIC', enum=['FULL_CONTROL', 'MODIFY', 'READ', 'OTHER']),
                    ),
                    Dict(
                        'flags',
                        Bool('FILE_INHERIT'),
                        Bool('DIRECTORY_INHERIT'),
                        Bool('NO_PROPAGATE_INHERIT'),
                        Bool('INHERIT_ONLY'),
                        Bool('INHERITED'),
                        Str('BASIC', enum=['INHERIT', 'NOINHERIT', 'OTHER']),
                    ),
                )
            ],
            default=[]
        ),
        Dict(
            'options',
            Bool('stripacl', default=False),
            Bool('recursive', default=False),
            Bool('traverse', default=False),
        )
    )
    @job(lock=lambda args: f'setacl:{args[0]}')
    def setacl(self, job, path, dacl, options):
        """
        Set ACL of a given path. Takes the following parameters:
        :path: realpath or relative path. We make a subsequent realpath call to resolve it.
        :dacl: Accept a "simplified" ACL here or a full ACL. If the simplified ACL
        contains ACE perms or flags that are "SPECIAL", then raise a validation error.
        :recursive: apply the ACL recursively
        :traverse: traverse filestem boundaries (ZFS datasets)
        :strip: convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        if dacl and options['stripacl']:
            raise CallError('Setting ACL and stripping ACL are not permitted simultaneously.', errno.EINVAL)

        if options['stripacl']:
            a = acl.ACL(file=path)
            a.strip()
            a.apply(path)
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                if entry['perms'].get('BASIC') == 'OTHER' or entry['flags'].get('BASIC') == 'OTHER':
                    raise CallError('Unable to apply simplified ACL due to OTHER entry. Use full ACL.', errno.EINVAL)
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': self.__convert_to_adv_permset(entry['perms']['BASIC']) if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags': self.__convert_to_adv_flagset(entry['flags']['BASIC']) if 'BASIC' in entry['perms'] else entry['flags'],
                }
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(path)

        if not options['recursive']:
            self.logger.debug('exiting early on non-recursive task')
            return True

        winacl = subprocess.run([
            '/usr/local/bin/winacl',
            '-a', 'clone',
            f"{'-rx' if options['traverse'] else '-r'}",
            '-p', path], check=False
        )
        if winacl.returncode != 0:
            raise CallError(f"Failed to recursively apply ACL: {winacl.stderr.decode()}")

        return True
예제 #18
0
파일: afp.py 프로젝트: bluecodecat/freenas
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(
        Dict('sharingafp_create',
             Str('path'),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             List('allow'),
             List('deny'),
             List('ro'),
             List('rw'),
             Bool('timemachine', default=False),
             Int('timemachine_quota', default=0),
             Bool('nodev', default=False),
             Bool('nostat', default=False),
             Bool('upriv', default=True),
             UnixPerm('fperm', default='644'),
             UnixPerm('dperm', default='755'),
             UnixPerm('umask', default='000'),
             List('hostsallow', items=[IPAddr('ip', cidr=True)]),
             List('hostsdeny', items=[IPAddr('ip', cidr=True)]),
             Str('auxparams'),
             register=True))
    async def do_create(self, data):
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingafp_create.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self.middleware.call('service.reload', 'afp')

        return data

    @accepts(Int('id'),
             Patch('sharingafp_create', 'sharingafp_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data['path']

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               "sharingafp_create.path", path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self.middleware.call('service.reload', 'afp')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data
예제 #19
0
class ChartReleaseService(Service):
    class Config:
        namespace = 'chart.release'

    @accepts()
    @returns(Dict(*[Str(r.name, enum=[r.name])
                    for r in SCALEABLE_RESOURCES], ))
    async def scaleable_resources(self):
        """
        Returns choices for types of workloads which can be scaled up/down.
        """
        return {r.name: r.name for r in SCALEABLE_RESOURCES}

    @accepts(Str('release_name'),
             Dict(
                 'scale_options',
                 Int('replica_count', required=True,
                     validators=[Range(min=0)]),
             ))
    @returns(
        Dict(
            'scale_chart_release',
            Dict('before_scale',
                 *[
                     Dict(r.value, additional_attrs=True)
                     for r in SCALEABLE_RESOURCES
                 ],
                 required=True),
            Dict('after_scale',
                 *[
                     Dict(r.value, additional_attrs=True)
                     for r in SCALEABLE_RESOURCES
                 ],
                 required=True),
        ))
    @job(lock=lambda args: f'{args[0]}_chart_release_scale')
    async def scale(self, job, release_name, options):
        """
        Scale a `release_name` chart release to `scale_options.replica_count` specified.

        This will scale deployments/statefulset to replica count specified.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call(
            'chart.release.query', [['id', '=', release_name]], {
                'get': True,
                'extra': {
                    'retrieve_resources': True,
                    'retrieve_locked_paths': True
                },
            })
        if options['replica_count']:
            # This means we have a number higher then 0 - we would like to make sure in this case that we
            # are not going to start an app which might be consuming a locked path
            if release['resources']['locked_host_paths']:
                raise CallError(
                    f'{release_name!r} cannot be started as it is consuming following host path(s) '
                    f'which are locked: {", ".join(release["resources"]["locked_host_paths"])}'
                )

        resources = release['resources']
        replica_counts = await self.get_replica_count_for_resources(resources)
        job.set_progress(
            20,
            f'Scaling workload(s) to {options["replica_count"]!r} replica(s)')
        try:
            await self.scale_release_internal(resources,
                                              options['replica_count'])
        except Exception:
            # This is a best effort to get relevant workloads back to replica count which they were on before
            await self.scale_release_internal(resources,
                                              replica_counts=replica_counts)
            raise
        else:
            desired_pods_count = sum(
                len(replica_counts[r.value]) * options['replica_count']
                for r in SCALEABLE_RESOURCES)
            job.set_progress(
                40,
                f'Waiting for pods to be scaled to {desired_pods_count!r} replica(s)'
            )
            while await self.middleware.call('k8s.pod.query', [
                ['metadata.namespace', '=', release['namespace']],
                ['status.phase', 'in', ['Running', 'Pending']],
            ], {'count': True}) != desired_pods_count:
                await asyncio.sleep(5)

        job.set_progress(
            100,
            f'Scaled workload(s) successfully to {options["replica_count"]!r} replica(s)'
        )

        return {
            'before_scale':
            replica_counts,
            'after_scale':
            await self.get_replica_count_for_resources(
                (await self.middleware.call('chart.release.query',
                                            [['id', '=', release_name]], {
                                                'get': True,
                                                'extra': {
                                                    'retrieve_resources': True
                                                }
                                            }))['resources'])
        }

    @private
    async def get_replica_count_for_resources(self, resources):
        replica_counts = {r.value: {} for r in SCALEABLE_RESOURCES}
        for resource in SCALEABLE_RESOURCES:
            for workload in resources[resource.value]:
                replica_counts[resource.value][
                    workload['metadata']['name']] = {
                        'replicas': workload['spec']['replicas'],
                    }

        return replica_counts

    @private
    async def scale_release_internal(self,
                                     resources,
                                     replicas=None,
                                     replica_counts=None,
                                     resource_check=False):
        if replicas is not None and replica_counts:
            raise CallError(
                'Only one of "replicas" or "replica_counts" should be specified'
            )
        elif replicas is None and not replica_counts:
            raise CallError(
                'Either one of "replicas" or "replica_counts" must be specified'
            )

        assert bool(resources or replica_counts) is True

        replica_counts = replica_counts or {
            r.value: {}
            for r in SCALEABLE_RESOURCES
        }
        if resource_check:
            resources_data = {
                r.name.lower(): {
                    w['metadata']['name']
                    for w in await self.middleware.call(
                        f'k8s.{r.name.lower()}.query')
                }
                for r in SCALEABLE_RESOURCES
            }

        for resource in SCALEABLE_RESOURCES:
            for workload in resources[resource.value]:
                replica_count = replica_counts[resource.value].get(
                    workload['metadata']['name'],
                    {}).get('replicas') or replicas

                if resource_check:
                    if workload['metadata']['name'] not in resources_data[
                            resource.name.lower()]:
                        continue

                await self.middleware.call(
                    f'k8s.{resource.name.lower()}.update',
                    workload['metadata']['name'], {
                        'namespace': workload['metadata']['namespace'],
                        'body': {
                            'spec': {
                                'replicas': replica_count,
                            }
                        }
                    })

    @accepts(
        Str('release_name'),
        List(
            'workloads',
            items=[
                Dict(
                    'scale_workload',
                    Int('replica_count', required=True),
                    Str('type',
                        enum=[r.name for r in SCALEABLE_RESOURCES],
                        required=True),
                    Str('name', required=True),
                )
            ],
            empty=False,
        ),
    )
    @returns()
    async def scale_workloads(self, release_name, workloads):
        """
        Scale workloads in a chart release to specified `replica_count`.
        """
        release = await self.middleware.call('chart.release.query',
                                             [['id', '=', release_name]], {
                                                 'get': True,
                                                 'extra': {
                                                     'retrieve_resources': True
                                                 }
                                             })

        not_found = {}
        scale_resources = {r.name: [] for r in SCALEABLE_RESOURCES}
        to_scale_resources = defaultdict(dict)

        for workload in workloads:
            to_scale_resources[workload['type']][workload['name']] = workload

        for scaleable_resource in SCALEABLE_RESOURCES:
            to_scale = to_scale_resources[scaleable_resource.name]
            if not to_scale:
                continue

            for resource in map(
                    lambda r: r['metadata']['name'], release['resources']
                [f'{scaleable_resource.name.lower()}s']):
                if resource in to_scale:
                    scale_resources[scaleable_resource.name].append(
                        to_scale[resource])
                    to_scale.pop(resource)

            not_found.update(to_scale)

        if not_found:
            raise CallError(
                f'Unable to find {", ".join(not_found)} workload(s) for {release_name} release',
                errno=errno.ENOENT)

        for resource_type in scale_resources:
            for workload in scale_resources[resource_type]:
                await self.middleware.call(
                    f'k8s.{resource_type.lower()}.update', workload['name'], {
                        'namespace': release['namespace'],
                        'body': {
                            'spec': {
                                'replicas': workload['replica_count']
                            }
                        },
                    })

    @private
    async def wait_for_pods_to_terminate(self, namespace, extra_filters=None):
        # wait for release to uninstall properly, helm right now does not support a flag for this but
        # a feature request is open in the community https://github.com/helm/helm/issues/2378
        while await self.middleware.call('k8s.pod.query', [
            ['metadata.namespace', '=', namespace],
            ['status.phase', 'in', ['Running', 'Pending']],
        ] + (extra_filters or [])):
            await asyncio.sleep(5)

    @private
    async def get_workload_to_pod_mapping(self, namespace):
        mapping = {'replicaset': defaultdict(dict), 'pod': defaultdict(dict)}
        for key in ('replicaset', 'pod'):
            for r in await self.middleware.call(f'k8s.{key}.query', [
                ['metadata.namespace', '=', namespace],
                ['metadata', 'rin', 'owner_references'],
            ], {'select': ['metadata']}):
                for owner_reference in filter(
                        lambda o: o.get('uid'),
                        r['metadata']['owner_references'] or []):
                    mapping[key][owner_reference['uid']][r['metadata']
                                                         ['uid']] = r

        pod_mapping = defaultdict(list)
        for parent, replicasets in mapping['replicaset'].items():
            for replicaset in map(lambda r: mapping['replicaset'][parent][r],
                                  replicasets):
                if replicaset['metadata']['uid'] not in mapping['pod']:
                    continue
                pod_mapping[parent].extend([
                    p['metadata']['name'] for p in mapping['pod'][
                        replicaset['metadata']['uid']].values()
                ])

        return pod_mapping

    @private
    async def scale_down_resources_consuming_locked_paths(self):
        args = [[r['id'], {
            'replica_count': 0
        }] for r in await self.middleware.call(
            'chart.release.query', [['status', '!=', 'STOPPED']], {
                'extra': {
                    'retrieve_resources': True,
                    'retrieve_locked_paths': True,
                }
            }) if r['resources']['locked_host_paths']]
        if args:
            await self.middleware.call('core.bulk', 'chart.release.scale',
                                       args)
예제 #20
0
파일: filesystem.py 프로젝트: ghos/freenas
class FilesystemService(Service):
    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.middleware.call_sync(
            'filesystem.acl_is_trivial', path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    def _winacl(self, path, action, uid, gid, options):
        chroot_dir = os.path.dirname(path)
        target = os.path.basename(path)
        winacl = subprocess.run([
            '/usr/local/bin/winacl', '-a', action, '-O',
            str(uid), '-G',
            str(gid), '-rx' if options['traverse'] else '-r', '-c', chroot_dir,
            '-p', target
        ],
                                check=False,
                                capture_output=True)
        if winacl.returncode != 0:
            CallError(
                f"Winacl {action} on path {path} failed with error: [{winacl.stderr.decode().strip()}]"
            )

    def _common_perm_path_validate(self, path):
        if not os.path.exists(path):
            raise CallError(f"Path not found: {path}", errno.ENOENT)

        if not os.path.realpath(path).startswith('/mnt/'):
            raise CallError(
                f"Changing permissions on paths outside of /mnt is not permitted: {path}",
                errno.EPERM)

        if os.path.realpath(path) in [
                x['path'] for x in self.middleware.call_sync('pool.query')
        ]:
            raise CallError(
                f"Changing permissions of root level dataset is not permitted: {path}",
                errno.EPERM)

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules, or if the path does not support NFSv4 ACLs (for example
        a path on a tmpfs filesystem).
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        has_nfs4_acl_support = os.pathconf(path, 64)
        if not has_nfs4_acl_support:
            return True

        return acl.ACL(file=path).is_trivial

    @accepts(
        Dict(
            'filesystem_ownership', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict('options', Bool('recursive', default=False),
                 Bool('traverse', default=False))))
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')

    @accepts(
        Dict(
            'filesystem_permission', Str('path', required=True),
            UnixPerm('mode', null=True), Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )))
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """
        acl_choices = []
        for x in ACLDefault:
            if x.value['visible']:
                acl_choices.append(x.name)

        return acl_choices

    @accepts(Str('acl_type', default='OPEN',
                 enum=[x.name for x in ACLDefault]))
    async def get_default_acl(self, acl_type):
        """
        Returns a default ACL depending on the usage specified by `acl_type`.
        If an admin group is defined, then an entry granting it full control will
        be placed at the top of the ACL.
        """
        acl = []
        admin_group = (await self.middleware.call('smb.config'))['admin_group']
        if acl_type == 'HOME' and (await self.middleware.call(
                'activedirectory.get_state')) == 'HEALTHY':
            acl_type = 'DOMAIN_HOME'
        if admin_group:
            acl.append({
                'tag':
                'GROUP',
                'id': (await self.middleware.call('dscache.get_uncached_group',
                                                  admin_group))['gr_gid'],
                'perms': {
                    'BASIC': 'FULL_CONTROL'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type':
                'ALLOW'
            })
        acl.extend((ACLDefault[acl_type].value)['acl'])

        return acl

    def _is_inheritable(self, flags):
        """
        Takes ACE flags and return True if any inheritance bits are set.
        """
        inheritance_flags = [
            'FILE_INHERIT', 'DIRECTORY_INHERIT', 'NO_PROPAGATE_INHERIT',
            'INHERIT_ONLY'
        ]
        for i in inheritance_flags:
            if flags.get(i):
                return True

        return False

    @private
    def canonicalize_acl_order(self, acl):
        """
        Convert flags to advanced, then separate the ACL into two lists. One for ACEs that have been inherited,
        one for aces that have not been inherited. Non-inherited ACEs take precedence
        and so they are placed first in finalized combined list. Within each list, the
        ACEs are orderd according to the following:

        1) Deny ACEs that apply to the object itself (NOINHERIT)

        2) Deny ACEs that apply to a subobject of the object (INHERIT)

        3) Allow ACEs that apply to the object itself (NOINHERIT)

        4) Allow ACEs that apply to a subobject of the object (INHERIT)

        See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl

        The "INHERITED" bit is stripped in filesystem.getacl when generating a BASIC flag type.
        It is best practice to use a non-simplified ACL for canonicalization.
        """
        inherited_aces = []
        final_acl = []
        non_inherited_aces = []
        for entry in acl:
            entry['flags'] = self.__convert_to_adv_flagset(
                entry['flags']
                ['BASIC']) if 'BASIC' in entry['flags'] else entry['flags']
            if entry['flags'].get('INHERITED'):
                inherited_aces.append(entry)
            else:
                non_inherited_aces.append(entry)

        if inherited_aces:
            inherited_aces = sorted(
                inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        if non_inherited_aces:
            non_inherited_aces = sorted(
                non_inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        final_acl = non_inherited_aces + inherited_aces
        return final_acl

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.

        Simplified returns a shortened form of the ACL permset and flags

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {
                'uid': stat.st_uid,
                'gid': stat.st_gid,
                'acl': advanced_acl
            }

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {
                        'BASIC':
                        self.__convert_to_basic_permset(entry['perms'])
                    },
                    'flags': {
                        'BASIC':
                        self.__convert_to_basic_flagset(entry['flags'])
                    },
                }
                if ace['tag'] == 'everyone@' and ace['perms'][
                        'BASIC'] == 'NOPERMS':
                    continue

                for key in ['perms', 'flags']:
                    if ace[key]['BASIC'] == 'OTHER':
                        ace[key] = entry[key]

                simple_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': simple_acl}

    @accepts(
        Dict(
            'filesystem_acl', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List('dacl',
                 items=[
                     Dict(
                         'aclentry',
                         Str('tag',
                             enum=[
                                 'owner@', 'group@', 'everyone@', 'USER',
                                 'GROUP'
                             ]),
                         Int('id', null=True),
                         Str('type', enum=['ALLOW', 'DENY']),
                         Dict(
                             'perms',
                             Bool('READ_DATA'),
                             Bool('WRITE_DATA'),
                             Bool('APPEND_DATA'),
                             Bool('READ_NAMED_ATTRS'),
                             Bool('WRITE_NAMED_ATTRS'),
                             Bool('EXECUTE'),
                             Bool('DELETE_CHILD'),
                             Bool('READ_ATTRIBUTES'),
                             Bool('WRITE_ATTRIBUTES'),
                             Bool('DELETE'),
                             Bool('READ_ACL'),
                             Bool('WRITE_ACL'),
                             Bool('WRITE_OWNER'),
                             Bool('SYNCHRONIZE'),
                             Str('BASIC',
                                 enum=[
                                     'FULL_CONTROL', 'MODIFY', 'READ',
                                     'TRAVERSE'
                                 ]),
                         ),
                         Dict(
                             'flags',
                             Bool('FILE_INHERIT'),
                             Bool('DIRECTORY_INHERIT'),
                             Bool('NO_PROPAGATE_INHERIT'),
                             Bool('INHERIT_ONLY'),
                             Bool('INHERITED'),
                             Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                         ),
                     )
                 ],
                 default=[]),
            Dict('options', Bool('stripacl', default=False),
                 Bool('recursive', default=False),
                 Bool('traverse', default=False),
                 Bool('canonicalize', default=True))))
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL)

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags']['INHERIT_ONLY'] and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting ACL.')
예제 #21
0
파일: ssh.py 프로젝트: vaibhav-rbs/freenas
class SSHService(SystemServiceService):

    class Config:
        service = "ssh"
        service_model = "ssh"
        datastore_prefix = "ssh_"

    @accepts()
    def bindiface_choices(self):
        """
        Available choices for the bindiface attribute of SSH service.
        """
        return self.middleware.call_sync('interface.choices')

    @accepts(Dict(
        'ssh_update',
        List('bindiface', items=[Str('iface')]),
        Int('tcpport', validators=[Range(min=1, max=65535)]),
        Bool('rootlogin'),
        Bool('passwordauth'),
        Bool('kerberosauth'),
        Bool('tcpfwd'),
        Bool('compression'),
        Str('sftp_log_level', enum=["", "QUIET", "FATAL", "ERROR", "INFO", "VERBOSE", "DEBUG", "DEBUG2", "DEBUG3"]),
        Str('sftp_log_facility', enum=["", "DAEMON", "USER", "AUTH", "LOCAL0", "LOCAL1", "LOCAL2", "LOCAL3", "LOCAL4",
                                       "LOCAL5", "LOCAL6", "LOCAL7"]),
        Str('options'),
        update=True
    ))
    async def do_update(self, data):
        """
        Update settings of SSH daemon service.

        If `bindiface` is empty it will listen for all available addresses.

        .. examples(websocket)::

          Make sshd listen only to igb0 interface.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "ssh.update",
                "params": [{
                    "bindiface": ["igb0"]
                }]
            }

        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        if new['bindiface']:
            verrors = ValidationErrors()
            iface_choices = await self.middleware.call('ssh.bindiface_choices')
            invalid_ifaces = list(filter(lambda x: x not in iface_choices, new['bindiface']))
            if invalid_ifaces:
                verrors.add(
                    'ssh_update.bindiface',
                    f'The following interfaces are not valid: {", ".join(invalid_ifaces)}',
                )
            verrors.check()

        await self._update_service(old, new)

        keyfile = "/usr/local/etc/ssh/ssh_host_ecdsa_key.pub"
        if os.path.exists(keyfile):
            with open(keyfile, "rb") as f:
                pubkey = f.read().strip().split(None, 3)[1]
            decoded_key = base64.b64decode(pubkey)
            key_digest = hashlib.sha256(decoded_key).digest()
            ssh_fingerprint = (b"SHA256:" + base64.b64encode(key_digest).replace(b"=", b"")).decode("utf-8")

            syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
            syslog.syslog(syslog.LOG_ERR, 'ECDSA Fingerprint of the SSH KEY: ' + ssh_fingerprint)
            syslog.closelog()

        return new
예제 #22
0
class ZFSPoolService(Service):
    class Config:
        namespace = 'zfs.pool'
        private = True

    @filterable
    def query(self, filters, options):
        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all pool
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                try:
                    pools = [zfs.get(filters[0][2]).__getstate__()]
                except libzfs.ZFSException:
                    pools = []
            else:
                pools = [i.__getstate__() for i in zfs.pools]
        return filter_list(pools, filters, options)

    @accepts(Str('pool'))
    async def get_disks(self, name):
        try:
            with libzfs.ZFS() as zfs:
                disks = list(zfs.get(name).disks)
        except libzfs.ZFSException as e:
            raise CallError(str(e), errno.ENOENT)

        await self.middleware.run_in_thread(geom.scan)
        labelclass = geom.class_by_name('LABEL')
        for absdev in disks:
            dev = absdev.replace('/dev/', '').replace('.eli', '')
            find = labelclass.xml.findall(
                f".//provider[name='{dev}']/../consumer/provider")
            name = None
            if find:
                name = geom.provider_by_id(find[0].get('ref')).geom.name
            else:
                g = geom.geom_by_name('DEV', dev)
                if g:
                    name = g.consumer.provider.geom.name

            if name and geom.geom_by_name('DISK', name):
                yield name
            else:
                self.logger.debug(f'Could not find disk for {dev}')

    @accepts(
        Str('name'),
        List('new'),
        List('existing',
             items=[
                 Dict(
                     'attachvdev',
                     Str('target'),
                     Str('type', enum=['DISK']),
                     Str('path'),
                 ),
             ]),
    )
    @job()
    def extend(self, job, name, new=None, existing=None):
        """
        Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs.
        """

        if new is None and existing is None:
            raise CallError('New or existing vdevs must be provided',
                            errno.EINVAL)

        if new:
            raise CallError('Adding new vdev is not implemented yet')

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)

                # Make sure we can find all target vdev
                for i in (existing or []):
                    target = find_vdev(pool, i['target'])
                    if target is None:
                        raise CallError(f'Failed to find vdev for {target}',
                                        errno.EINVAL)
                    i['target'] = target

                for i in (existing or []):
                    newvdev = libzfs.ZFSVdev(zfs, i['type'].lower())
                    newvdev.path = i['path']
                    i['target'].attach(newvdev)

        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('pool'), Str('label'))
    def detach(self, name, label):
        """
        Detach device `label` from the pool `pool`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)
                target.detach()
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('pool'), Str('label'), Str('dev'))
    def replace(self, name, label, dev):
        """
        Replace device `label` with `dev` in pool `name`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)

                newvdev = libzfs.ZFSVdev(zfs, 'disk')
                newvdev.path = f'/dev/{dev}'
                target.replace(newvdev)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('name'))
    @job(lock=lambda i: i[0])
    def scrub(self, job, name):
        """
        Start a scrub on pool `name`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                pool.start_scrub()
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

        def watch():
            while True:
                with libzfs.ZFS() as zfs:
                    scrub = zfs.get(name).scrub.__getstate__()
                if scrub['function'] != 'SCRUB':
                    break

                if scrub['state'] == 'FINISHED':
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub['state'] == 'CANCELED':
                    break

                if scrub['state'] == 'SCANNING':
                    job.set_progress(scrub['percentage'], 'Scrubbing')
                time.sleep(1)

        t = threading.Thread(target=watch, daemon=True)
        t.start()
        t.join()

    @accepts()
    def find_import(self):
        with libzfs.ZFS() as zfs:
            return [i.__getstate__() for i in zfs.find_import()]
예제 #23
0
class PeriodicSnapshotTaskService(CRUDService):
    class Config:
        datastore = 'storage.task'
        datastore_prefix = 'task_'
        datastore_extend = 'pool.snapshottask.extend'
        datastore_extend_context = 'pool.snapshottask.extend_context'
        namespace = 'pool.snapshottask'

    @private
    async def extend_context(self, extra):
        return {
            'state': await self.middleware.call('zettarepl.get_state'),
            'vmware': await self.middleware.call('vmware.query'),
        }

    @private
    async def extend(self, data, context):
        Cron.convert_db_format_to_schedule(data, begin_end=True)

        data['vmware_sync'] = any((vmware['filesystem'] == data['dataset'] or (
            data['recursive']
            and is_child(vmware['filesystem'], data['dataset'])))
                                  for vmware in context['vmware'])

        if 'error' in context['state']:
            data['state'] = context['state']['error']
        else:
            data['state'] = context['state']['tasks'].get(
                f'periodic_snapshot_task_{data["id"]}', {
                    'state': 'PENDING',
                })

        return data

    @accepts(
        Dict('periodic_snapshot_create',
             Path('dataset', required=True),
             Bool('recursive', required=True),
             List('exclude', items=[Path('item', empty=False)], default=[]),
             Int('lifetime_value', required=True),
             Str('lifetime_unit',
                 enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR'],
                 required=True),
             Str('naming_schema',
                 required=True,
                 validators=[ReplicationSnapshotNamingSchema()]),
             Cron('schedule',
                  defaults={
                      'minute': '00',
                      'begin': '00:00',
                      'end': '23:59',
                  },
                  required=True,
                  begin_end=True),
             Bool('allow_empty', default=True),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create a Periodic Snapshot Task

        Create a Periodic Snapshot Task that will take snapshots of specified `dataset` at specified `schedule`.
        Recursive snapshots can be created if `recursive` flag is enabled. You can `exclude` specific child datasets
        from snapshot.
        Snapshots will be automatically destroyed after a certain amount of time, specified by
        `lifetime_value` and `lifetime_unit`.
        Snapshots will be named according to `naming_schema` which is a `strftime`-like template for snapshot name
        and must contain `%Y`, `%m`, `%d`, `%H` and `%M`.

        .. examples(websocket)::

          Create a recursive Periodic Snapshot Task for dataset `data/work` excluding `data/work/temp`. Snapshots
          will be created on weekdays every hour from 09:00 to 18:00 and will be stored for two weeks.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.create",
                "params": [{
                    "dataset": "data/work",
                    "recursive": true,
                    "exclude": ["data/work/temp"],
                    "lifetime_value": 2,
                    "lifetime_unit": "WEEK",
                    "naming_schema": "auto_%Y-%m-%d_%H-%M",
                    "schedule": {
                        "minute": "0",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "1,2,3,4,5",
                        "begin": "09:00",
                        "end": "18:00"
                    }
                }]
            }
        """

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data, begin_end=True)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch('periodic_snapshot_create', 'periodic_snapshot_update',
                   ('attr', {
                       'update': True
                   })))
    async def do_update(self, id, data):
        """
        Update a Periodic Snapshot Task with specific `id`

        See the documentation for `create` method for information on payload contents

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.update",
                "params": [
                    1,
                    {
                        "dataset": "data/work",
                        "recursive": true,
                        "exclude": ["data/work/temp"],
                        "lifetime_value": 2,
                        "lifetime_unit": "WEEK",
                        "naming_schema": "auto_%Y-%m-%d_%H-%M",
                        "schedule": {
                            "minute": "0",
                            "hour": "*",
                            "dom": "*",
                            "month": "*",
                            "dow": "1,2,3,4,5",
                            "begin": "09:00",
                            "end": "18:00"
                        }
                    }
                ]
            }
        """

        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('periodic_snapshot_update', await
                          self._validate(new))

        if not new['enabled']:
            for replication_task in await self.middleware.call(
                    'replication.query', [['enabled', '=', True]]):
                if any(periodic_snapshot_task['id'] == id
                       for periodic_snapshot_task in
                       replication_task['periodic_snapshot_tasks']):
                    verrors.add('periodic_snapshot_update.enabled', (
                        f'You can\'t disable this periodic snapshot task because it is bound to enabled replication '
                        f'task {replication_task["id"]!r}'))
                    break

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new, begin_end=True)

        for key in ('vmware_sync', 'state'):
            new.pop(key, None)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        await self.middleware.call('zettarepl.update_tasks')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a Periodic Snapshot Task with specific `id`

        .. examples(websocket)::

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "pool.snapshottask.delete",
                "params": [
                    1
                ]
            }
        """

        for replication_task in await self.middleware.call(
                'replication.query', [
                    ['direction', '=', 'PUSH'],
                    ['also_include_naming_schema', '=', []],
                    ['enabled', '=', True],
                ]):
            if len(replication_task['periodic_snapshot_tasks']) == 1:
                if replication_task['periodic_snapshot_tasks'][0]['id'] == id:
                    raise CallError(
                        f'You are deleting the last periodic snapshot task bound to enabled replication task '
                        f'{replication_task["name"]!r} which will break it. Please, disable that replication task '
                        f'first.', )

        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        await self.middleware.call('zettarepl.update_tasks')

        return response

    @item_method
    @accepts(Int("id"))
    async def run(self, id):
        """
        Execute a Periodic Snapshot Task of `id`.
        """
        task = await self._get_instance(id)

        if not task["enabled"]:
            raise CallError("Task is not enabled")

        await self.middleware.call("zettarepl.run_periodic_snapshot_task",
                                   task["id"])

    async def _validate(self, data):
        verrors = ValidationErrors()

        if data['dataset'] not in (
                await self.middleware.call('pool.filesystem_choices')):
            verrors.add('dataset', 'Invalid ZFS dataset')

        if not data['recursive'] and data['exclude']:
            verrors.add(
                'exclude',
                'Excluding datasets has no sense for non-recursive periodic snapshot tasks'
            )

        for i, v in enumerate(data['exclude']):
            if not v.startswith(f'{data["dataset"]}/'):
                verrors.add(
                    f'exclude.{i}',
                    'Excluded dataset should be a child of selected dataset')

        return verrors
예제 #24
0
class NFSService(SystemServiceService):
    class Config:
        service = "nfs"
        datastore_prefix = "nfs_srv_"
        datastore_extend = 'nfs.nfs_extend'

    @private
    def nfs_extend(self, nfs):
        nfs["userd_manage_gids"] = nfs.pop("16")
        return nfs

    @private
    def nfs_compress(self, nfs):
        nfs["16"] = nfs.pop("userd_manage_gids")
        return nfs

    @accepts(
        Dict(
            'nfs_update',
            Int('servers', validators=[Range(min=1, max=256)]),
            Bool('udp'),
            Bool('allow_nonroot'),
            Bool('v4'),
            Bool('v4_v3owner'),
            Bool('v4_krb'),
            List('bindip', items=[IPAddr('ip')]),
            Int('mountd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Int('rpcstatd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Int('rpclockd_port',
                required=False,
                validators=[Range(min=1, max=65535)]),
            Bool('userd_manage_gids'),
            Bool('mountd_log'),
            Bool('statd_lockd_log'),
        ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if not new["v4"] and new["v4_v3owner"]:
            verrors.add("nfs_update.v4_v3owner",
                        "This option requires enabling NFSv4")

        if new["v4_v3owner"] and new["userd_manage_gids"]:
            verrors.add(
                "nfs_update.userd_manage_gids",
                "This option is incompatible with NFSv3 ownership model for NFSv4"
            )

        if verrors:
            raise verrors

        self.nfs_compress(new)

        await self._update_service(old, new)

        self.nfs_extend(new)

        return new
예제 #25
0
class CatalogService(Service):
    class Config:
        cli_namespace = 'app.catalog'

    @private
    def cached(self, label):
        return self.middleware.call_sync('cache.has_key', get_cache_key(label))

    @accepts(Str('label'),
             Dict(
                 'options',
                 Bool('cache', default=True),
                 Bool('cache_only', default=False),
                 Bool('retrieve_all_trains', default=True),
                 List('trains', items=[Str('train_name')]),
             ))
    @returns(
        Dict(
            'trains',
            additional_attrs=True,
            example={
                'charts': {
                    'chia': {
                        'name': 'chia',
                        'categories': ['storage', 'crypto'],
                        'app_readme': 'app readme here',
                        'location':
                        '/mnt/evo/ix-applications/catalogs/github_com_truenas_charts_git_master/charts/chia',
                        'healthy': True,
                        'healthy_error': False,
                        'latest_version': '1.2.0',
                        'latest_app_version': '1.1.6',
                        'icon_url': 'https://www.chia.net/img/chia_logo.svg',
                    }
                }
            }))
    @job(lock=lambda args: f'catalog_item_retrieval_{json.dumps(args)}',
         lock_queue_size=1)
    def items(self, job, label, options):
        """
        Retrieve item details for `label` catalog.

        `options.cache` is a boolean which when set will try to get items details for `label` catalog from cache
        if available.

        `options.cache_only` is a boolean which when set will force usage of cache only for retrieving catalog
        information. If the content for the catalog in question is not cached, no content would be returned. If
        `options.cache` is unset, this attribute has no effect.

        `options.retrieve_all_trains` is a boolean value which when set will retrieve information for all the trains
        present in the catalog ( it is set by default ).

        `options.trains` is a list of train name(s) which will allow selective filtering to retrieve only information
        of desired trains in a catalog. If `options.retrieve_all_trains` is set, it has precedence over `options.train`.
        """
        catalog = self.middleware.call_sync('catalog.get_instance', label)
        all_trains = options['retrieve_all_trains']
        cache_key = get_cache_key(label)
        cache_available = self.middleware.call_sync('cache.has_key', cache_key)
        if options['cache'] and options['cache_only'] and not cache_available:
            return {}

        if options['cache'] and cache_available:
            job.set_progress(10, 'Retrieving cached content')
            orig_data = self.middleware.call_sync('cache.get', cache_key)
            job.set_progress(60, 'Normalizing cached content')
            cached_data = {}
            for train in orig_data:
                if not all_trains and train not in options['trains']:
                    continue

                train_data = {}
                for catalog_item in orig_data[train]:
                    train_data[catalog_item] = {
                        k: v
                        for k, v in orig_data[train][catalog_item].items()
                    }

                cached_data[train] = train_data

            job.set_progress(100,
                             'Retrieved catalog item(s) details successfully')
            self.middleware.loop.call_later(
                30, functools.partial(job.set_result, None))
            return cached_data
        elif not os.path.exists(catalog['location']):
            job.set_progress(5, f'Cloning {label!r} catalog repository')
            self.middleware.call_sync('catalog.update_git_repository', catalog)

        if all_trains:
            # We can only safely say that the catalog is healthy if we retrieve data for all trains
            self.middleware.call_sync('alert.oneshot_delete',
                                      'CatalogNotHealthy', label)

        trains = self.get_trains(job, catalog, options)

        if all_trains:
            # We will only update cache if we are retrieving data of all trains for a catalog
            # which happens when we sync catalog(s) periodically or manually
            # We cache for 90000 seconds giving system an extra 1 hour to refresh it's cache which
            # happens after 24h - which means that for a small amount of time it's possible that user
            # come with a case where system is trying to access cached data but it has expired and it's
            # reading again from disk hence the extra 1 hour.
            self.middleware.call_sync('cache.put', get_cache_key(label),
                                      trains, 90000)

        if label == self.middleware.call_sync(
                'catalog.official_catalog_label'):
            # Update feature map cache whenever official catalog is updated
            self.middleware.call_sync('catalog.get_feature_map', False)

        job.set_progress(
            100, f'Successfully retrieved {label!r} catalog information')
        self.middleware.loop.call_later(
            30, functools.partial(job.set_result, None))
        return trains

    @private
    def retrieve_train_names(self,
                             location,
                             all_trains=True,
                             trains_filter=None):
        train_names = []
        trains_filter = trains_filter or []
        for train in os.listdir(location):
            if (not (all_trains or train in trains_filter)
                    or not os.path.isdir(os.path.join(location, train))
                    or train.startswith('.') or train in ('library', 'docs')
                    or not VALID_TRAIN_REGEX.match(train)):
                continue
            train_names.append(train)
        return train_names

    @private
    def get_trains(self, job, catalog, options):
        # We make sure we do not dive into library and docs folders and not consider those a train
        # This allows us to use these folders for placing helm library charts and docs respectively
        trains = {'charts': {}, 'test': {}}
        location = catalog['location']
        questions_context = self.middleware.call_sync(
            'catalog.get_normalised_questions_context')
        unhealthy_apps = set()
        preferred_trains = catalog['preferred_trains']

        trains_to_traverse = self.retrieve_train_names(
            location, options['retrieve_all_trains'], options['trains'])
        # In order to calculate job progress, we need to know number of items we would be traversing
        items = {}
        for train in trains_to_traverse:
            trains[train] = {}
            items.update({
                f'{i}_{train}': train
                for i in os.listdir(os.path.join(location, train))
                if os.path.isdir(os.path.join(location, train, i))
            })

        job.set_progress(
            8,
            f'Retrieving {", ".join(trains_to_traverse)!r} train(s) information'
        )

        total_items = len(items)
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=(5 if total_items > 10 else 2)) as exc:
            for index, result in enumerate(
                    zip(
                        items,
                        exc.map(functools.partial(item_details, items,
                                                  location, questions_context),
                                items,
                                chunksize=(10 if total_items > 10 else 5)))):
                item_key = result[0]
                item_info = result[1]
                train = items[item_key]
                item = item_key.removesuffix(f'_{train}')
                job.set_progress(
                    int((index / total_items) * 80) + 10,
                    f'Retrieved information of {item!r} item from {train!r} train'
                )
                trains[train][item] = item_info
                if train in preferred_trains and not trains[train][item][
                        'healthy']:
                    unhealthy_apps.add(f'{item} ({train} train)')

        if unhealthy_apps:
            self.middleware.call_sync('alert.oneshot_create',
                                      'CatalogNotHealthy', {
                                          'catalog': catalog['id'],
                                          'apps': ', '.join(unhealthy_apps)
                                      })

        job.set_progress(
            90,
            f'Retrieved {", ".join(trains_to_traverse)} train(s) information')
        return trains

    @private
    def item_version_details(self, version_path, questions_context=None):
        if not questions_context:
            questions_context = self.middleware.call_sync(
                'catalog.get_normalised_questions_context')
        return get_item_version_details(version_path, questions_context)

    @private
    async def get_normalised_questions_context(self):
        k8s_started = await self.middleware.call(
            'kubernetes.validate_k8s_setup', False)
        return {
            'nic_choices':
            await self.middleware.call('chart.release.nic_choices'),
            'gpus':
            await self.middleware.call('k8s.gpu.available_gpus')
            if k8s_started else {},
            'timezones':
            await self.middleware.call('system.general.timezone_choices'),
            'node_ip':
            await self.middleware.call('kubernetes.node_ip'),
            'certificates':
            await self.middleware.call('chart.release.certificate_choices'),
            'certificate_authorities':
            await self.middleware.call(
                'chart.release.certificate_authority_choices'),
            'system.general.config':
            await self.middleware.call('system.general.config'),
        }
예제 #26
0
class SharingNFSService(CRUDService):
    class Config:
        namespace = "sharing.nfs"
        datastore = "sharing.nfs_share"
        datastore_prefix = "nfs_"
        datastore_extend = "sharing.nfs.extend"

    @accepts(
        Dict(
            "sharingnfs_create",
            List("paths", items=[Dir("path")]),
            Str("comment"),
            List("networks", items=[IPAddr("network", cidr=True)]),
            List("hosts", items=[Str("host")]),
            Bool("alldirs"),
            Bool("ro"),
            Bool("quiet"),
            Str("maproot_user", required=False, default=None),
            Str("maproot_group", required=False, default=None),
            Str("mapall_user", required=False, default=None),
            Str("mapall_group", required=False, default=None),
            List("security",
                 items=[
                     Str("provider", enum=["SYS", "KRB5", "KRB5I", "KRB5P"])
                 ]),
            register=True,
        ))
    async def do_create(self, data):
        verrors = ValidationErrors()

        await self.validate(data, "sharingnfs_create", verrors)

        if verrors:
            raise verrors

        await self.compress(data)
        paths = data.pop("paths")
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": data["id"],
                    "path": path,
                },
            )
        await self.extend(data)

        await self.middleware.call("service.reload", "nfs")

        return data

    @accepts(Int("id"),
             Patch("sharingnfs_create", "sharingnfs_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        old = await self.middleware.call(
            "datastore.query",
            self._config.datastore,
            [("id", "=", id)],
            {
                "extend": self._config.datastore_extend,
                "prefix": self._config.datastore_prefix,
                "get": True
            },
        )

        new = old.copy()
        new.update(data)

        await self.validate(new, "sharingnfs_update", verrors, old=old)

        if verrors:
            raise verrors

        await self.compress(new)
        paths = new.pop("paths")
        await self.middleware.call("datastore.update", self._config.datastore,
                                   id, new,
                                   {"prefix": self._config.datastore_prefix})
        await self.middleware.call("datastore.delete",
                                   "sharing.nfs_share_path",
                                   [["share_id", "=", id]])
        for path in paths:
            await self.middleware.call(
                "datastore.insert",
                "sharing.nfs_share_path",
                {
                    "share_id": id,
                    "path": path,
                },
            )

        await self.extend(new)
        new["paths"] = paths

        await self.middleware.call("service.reload", "nfs")

        return new

    @accepts(Int("id"))
    async def do_delete(self, id):
        await self.middleware.call("datastore.delete",
                                   "sharing.nfs_share_path",
                                   [["share_id", "=", id]])
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        if not data["paths"]:
            verrors.add(f"{schema_name}.paths",
                        "At least one path is required")

        await self.middleware.run_in_io_thread(self.validate_paths, data,
                                               schema_name, verrors)

        filters = []
        if old:
            filters.append(["id", "!=", old["id"]])
        other_shares = await self.middleware.call("sharing.nfs.query", filters)
        dns_cache = await self.resolve_hostnames(
            sum([share["hosts"]
                 for share in other_shares], []) + data["hosts"])
        await self.middleware.run_in_io_thread(
            self.validate_hosts_and_networks, other_shares, data, schema_name,
            verrors, dns_cache)

        for k in ["maproot", "mapall"]:
            if not data[f"{k}_user"] and not data[f"{k}_group"]:
                pass
            elif not data[f"{k}_user"] and data[f"{k}_group"]:
                verrors.add(
                    f"{schema_name}.{k}_user",
                    "This field is required when map group is specified")
            else:
                user = await self.middleware.call(
                    "user.query", [("username", "=", data[f"{k}_user"])])
                if not user:
                    verrors.add(f"{schema_name}.{k}_user", "User not found")

                if data[f"{k}_group"]:
                    group = await self.middleware.call(
                        "group.query", [("group", "=", data[f"{k}_group"])])
                    if not group:
                        verrors.add(f"{schema_name}.{k}_group",
                                    "Group not found")

        if data["maproot_user"] and data["mapall_user"]:
            verrors.add(f"{schema_name}.mapall_user",
                        "maproot_user disqualifies mapall_user")

        if data["security"]:
            nfs_config = await self.middleware.call("nfs.config")
            if not nfs_config["v4"]:
                verrors.add(f"{schema_name}.security",
                            "This is not allowed when NFS v4 is disabled")

    @private
    def validate_paths(self, data, schema_name, verrors):
        dev = None
        is_mountpoint = False
        for i, path in enumerate(data["paths"]):
            stat = os.stat(path)
            if dev is None:
                dev = stat.st_dev
            else:
                if dev != stat.st_dev:
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "Paths for a NFS share must reside within the same filesystem"
                    )

            parent = os.path.abspath(os.path.join(path, ".."))
            if os.stat(parent).st_dev != dev:
                is_mountpoint = True
                if any(
                        os.path.abspath(p).startswith(parent + "/")
                        for p in data["paths"] if p != path):
                    verrors.add(
                        f"{schema_name}.paths.{i}",
                        "You cannot share a mount point and subdirectories all at once"
                    )

        if not is_mountpoint and data["alldirs"]:
            verrors.add(f"{schema_name}.alldirs",
                        "This option can only be used for datasets")

    @private
    async def resolve_hostnames(self, hostnames):
        hostnames = list(set(hostnames))

        async def resolve(hostname):
            try:
                return (await asyncio.wait_for(
                    self.middleware.run_in_io_thread(socket.getaddrinfo,
                                                     hostname, None),
                    5))[0][4][0]
            except Exception as e:
                self.logger.warning("Unable to resolve host %r: %r", hostname,
                                    e)
                return None

        resolved_hostnames = await asyncio_map(resolve, hostnames, 8)

        return dict(zip(hostnames, resolved_hostnames))

    @private
    def validate_hosts_and_networks(self, other_shares, data, schema_name,
                                    verrors, dns_cache):
        explanation = (
            ". This is so because /etc/exports does not act like ACL and it is undefined which rule among "
            "all overlapping networks will be applied.")

        dev = os.stat(data["paths"][0]).st_dev

        used_networks = set()
        for share in other_shares:
            try:
                share_dev = os.stat(share["paths"][0]).st_dev
            except Exception:
                self.logger.warning("Failed to stat first path for %r",
                                    share,
                                    exc_info=True)
                continue

            if share_dev == dev:
                for host in share["hosts"]:
                    host = dns_cache[host]
                    if host is None:
                        continue

                    try:
                        network = ipaddress.ip_network(host)
                    except Exception:
                        self.logger.warning("Got invalid host %r", host)
                        continue
                    else:
                        used_networks.add(network)

                for network in share["networks"]:
                    try:
                        network = ipaddress.ip_network(network, strict=False)
                    except Exception:
                        self.logger.warning("Got invalid network %r", network)
                        continue
                    else:
                        used_networks.add(network)

                if not share["hosts"] and not share["networks"]:
                    used_networks.add(ipaddress.ip_network("0.0.0.0/0"))
                    used_networks.add(ipaddress.ip_network("::/0"))

                if share["alldirs"] and data["alldirs"]:
                    verrors.add(
                        f"{schema_name}.alldirs",
                        "This option is only available once per mountpoint")

        had_explanation = False
        for i, host in enumerate(data["hosts"]):
            host = dns_cache[host]
            if host is None:
                verrors.add(f"{schema_name}.hosts.{i}",
                            "Unable to resolve host")
                continue

            network = ipaddress.ip_network(host)
            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.hosts.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        had_explanation = False
        for i, network in enumerate(data["networks"]):
            network = ipaddress.ip_network(network, strict=False)

            for another_network in used_networks:
                if network.overlaps(another_network):
                    verrors.add(f"{schema_name}.networks.{i}", (
                        f"You can't share same filesystem with overlapping networks {network} and {another_network}"
                        + ("" if had_explanation else explanation)))
                    had_explanation = True

            used_networks.add(network)

        if not data["hosts"] and not data["networks"]:
            if used_networks:
                verrors.add(
                    f"{schema_name}.networks",
                    (f"You can't share same filesystem with all hosts twice" +
                     ("" if had_explanation else explanation)))

    @private
    async def extend(self, data):
        data["paths"] = [
            path["path"] for path in await self.middleware.call(
                "datastore.query", "sharing.nfs_share_path",
                [["share_id", "=", data["id"]]])
        ]
        data["networks"] = data.pop("network").split()
        data["hosts"] = data["hosts"].split()
        data["security"] = [s.upper() for s in data["security"]]
        return data

    @private
    async def compress(self, data):
        data["network"] = " ".join(data.pop("networks"))
        data["hosts"] = " ".join(data["hosts"])
        data["security"] = [s.lower() for s in data["security"]]
        return data
예제 #27
0
파일: zfs.py 프로젝트: junweilife/freenas
class ZFSPoolService(CRUDService):
    class Config:
        namespace = 'zfs.pool'
        private = True
        process_pool = True

    @filterable
    def query(self, filters, options):
        # We should not get datasets, there is zfs.dataset.query for that
        state_kwargs = {'datasets_recursive': False}
        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all pool
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                try:
                    pools = [
                        zfs.get(filters[0][2]).__getstate__(**state_kwargs)
                    ]
                except libzfs.ZFSException:
                    pools = []
            else:
                pools = [i.__getstate__(**state_kwargs) for i in zfs.pools]
        return filter_list(pools, filters, options)

    @accepts(
        Dict(
            'zfspool_create',
            Str('name', required=True),
            List('vdevs',
                 items=[
                     Dict(
                         'vdev',
                         Str('root',
                             enum=['DATA', 'CACHE', 'LOG', 'SPARE'],
                             required=True),
                         Str('type',
                             enum=[
                                 'RAIDZ1', 'RAIDZ2', 'RAIDZ3', 'MIRROR',
                                 'STRIPE'
                             ],
                             required=True),
                         List('devices', items=[Str('disk')], required=True),
                     ),
                 ],
                 required=True),
            Dict('options', additional_attrs=True),
            Dict('fsoptions', additional_attrs=True),
        ), )
    def do_create(self, data):
        with libzfs.ZFS() as zfs:
            topology = convert_topology(zfs, data['vdevs'])
            zfs.create(data['name'], topology, data['options'],
                       data['fsoptions'])

        return self.middleware.call_sync('zfs.pool._get_instance',
                                         data['name'])

    @accepts(Str('pool'),
             Dict(
                 'options',
                 Dict('properties', additional_attrs=True),
             ))
    def do_update(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                for k, v in options['properties'].items():
                    prop = pool.properties[k]
                    if 'value' in v:
                        prop.value = v['value']
                    elif 'parsed' in v:
                        prop.parsed = v['parsed']
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def do_delete(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                zfs.destroy(name, force=options['force'])
        except libzfs.ZFSException as e:
            errno_ = errno.EFAULT
            if e.code == libzfs.Error.UMOUNTFAILED:
                errno_ = errno.EBUSY
            raise CallError(str(e), errno_)

    @accepts(Str('pool', required=True))
    def upgrade(self, pool):
        try:
            with libzfs.ZFS() as zfs:
                zfs.get(pool).upgrade()
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'), Dict(
        'options',
        Bool('force', default=False),
    ))
    def export(self, name, options):
        try:
            with libzfs.ZFS() as zfs:
                # FIXME: force not yet implemented
                pool = zfs.get(name)
                zfs.export_pool(pool)
        except libzfs.ZFSException as e:
            raise CallError(str(e))

    @accepts(Str('pool'))
    def get_devices(self, name):
        try:
            with libzfs.ZFS() as zfs:
                return [i.replace('/dev/', '') for i in zfs.get(name).disks]
        except libzfs.ZFSException as e:
            raise CallError(str(e), errno.ENOENT)

    @accepts(Str('pool'))
    def get_disks(self, name):
        disks = self.get_devices(name)

        geom.scan()
        labelclass = geom.class_by_name('LABEL')
        for dev in disks:
            dev = dev.replace('.eli', '')
            find = labelclass.xml.findall(
                f".//provider[name='{dev}']/../consumer/provider")
            name = None
            if find:
                name = geom.provider_by_id(find[0].get('ref')).geom.name
            else:
                g = geom.geom_by_name('DEV', dev)
                if g:
                    name = g.consumer.provider.geom.name

            if name and (name.startswith('multipath/')
                         or geom.geom_by_name('DISK', name)):
                yield name
            else:
                self.logger.debug(f'Could not find disk for {dev}')

    @accepts(
        Str('name'),
        List('new', default=None, null=True),
        List('existing',
             items=[
                 Dict(
                     'attachvdev',
                     Str('target'),
                     Str('type', enum=['DISK']),
                     Str('path'),
                 ),
             ],
             null=True,
             default=None),
    )
    @job()
    def extend(self, job, name, new=None, existing=None):
        """
        Extend a zfs pool `name` with `new` vdevs or attach to `existing` vdevs.
        """

        if new is None and existing is None:
            raise CallError('New or existing vdevs must be provided',
                            errno.EINVAL)

        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)

                if new:
                    topology = convert_topology(zfs, new)
                    pool.attach_vdevs(topology)

                # Make sure we can find all target vdev
                for i in (existing or []):
                    target = find_vdev(pool, i['target'])
                    if target is None:
                        raise CallError(
                            f"Failed to find vdev for {i['target']}",
                            errno.EINVAL)
                    i['target'] = target

                for i in (existing or []):
                    newvdev = libzfs.ZFSVdev(zfs, i['type'].lower())
                    newvdev.path = i['path']
                    i['target'].attach(newvdev)

        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    def __zfs_vdev_operation(self, name, label, op, *args):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)
                op(target, *args)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('pool'), Str('label'))
    def detach(self, name, label):
        """
        Detach device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label, lambda target: target.detach())

    @accepts(Str('pool'), Str('label'))
    def offline(self, name, label):
        """
        Offline device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label, lambda target: target.offline())

    @accepts(Str('pool'), Str('label'), Bool('expand', default=False))
    def online(self, name, label, expand=False):
        """
        Online device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label,
                                  lambda target, *args: target.online(*args),
                                  expand)

    @accepts(Str('pool'), Str('label'))
    def remove(self, name, label):
        """
        Remove device `label` from the pool `pool`.
        """
        self.__zfs_vdev_operation(name, label, lambda target: target.remove())

    @accepts(Str('pool'), Str('label'), Str('dev'))
    def replace(self, name, label, dev):
        """
        Replace device `label` with `dev` in pool `name`.
        """
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                target = find_vdev(pool, label)
                if target is None:
                    raise CallError(f'Failed to find vdev for {label}',
                                    errno.EINVAL)

                newvdev = libzfs.ZFSVdev(zfs, 'disk')
                newvdev.path = f'/dev/{dev}'
                # FIXME: Replace using old path is not working for some reason
                # Lets use guid for now.
                target.path = str(target.guid)
                target.replace(newvdev)
        except libzfs.ZFSException as e:
            raise CallError(str(e), e.code)

    @accepts(Str('name', required=True),
             Str('action', enum=['START', 'STOP', 'PAUSE'], default='START'))
    @job(lock=lambda i: f'{i[0]}-{i[1] if len(i) >= 2 else "START"}')
    def scrub(self, job, name, action=None):
        """
        Start/Stop/Pause a scrub on pool `name`.
        """
        if action != 'PAUSE':
            try:
                with libzfs.ZFS() as zfs:
                    pool = zfs.get(name)

                    if action == 'START':
                        pool.start_scrub()
                    else:
                        pool.stop_scrub()
            except libzfs.ZFSException as e:
                raise CallError(str(e), e.code)
        else:
            proc = subprocess.Popen(f'zpool scrub -p {name}'.split(' '),
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.communicate()

            if proc.returncode != 0:
                raise CallError('Unable to pause scrubbing')

        def watch():
            while True:
                with libzfs.ZFS() as zfs:
                    scrub = zfs.get(name).scrub.__getstate__()

                if scrub['pause']:
                    job.set_progress(100, 'Scrub paused')
                    break

                if scrub['function'] != 'SCRUB':
                    break

                if scrub['state'] == 'FINISHED':
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub['state'] == 'CANCELED':
                    break

                if scrub['state'] == 'SCANNING':
                    job.set_progress(scrub['percentage'], 'Scrubbing')
                time.sleep(1)

        if action == 'START':
            t = threading.Thread(target=watch, daemon=True)
            t.start()
            t.join()

    @accepts()
    def find_import(self):
        with libzfs.ZFS() as zfs:
            return [i.__getstate__() for i in zfs.find_import()]

    @accepts(
        Str('name_or_guid'),
        Dict('options', additional_attrs=True),
        Bool('any_host', default=True),
        Str('cachefile', null=True, default=None),
    )
    def import_pool(self, name_or_guid, options, any_host, cachefile):
        found = False
        with libzfs.ZFS() as zfs:
            for pool in zfs.find_import(cachefile=cachefile):
                if pool.name == name_or_guid or str(pool.guid) == name_or_guid:
                    found = pool
                    break

            if not found:
                raise CallError(f'Pool {name_or_guid} not found.',
                                errno.ENOENT)

            zfs.import_pool(found, found.name, options, any_host=any_host)

    @accepts(Str('pool'))
    async def find_not_online(self, pool):
        pool = await self.middleware.call('zfs.pool.query',
                                          [['id', '=', pool]], {'get': True})

        unavails = []
        for nodes in pool['groups'].values():
            for node in nodes:
                unavails.extend(self.__find_not_online(node))
        return unavails

    def __find_not_online(self, node):
        if len(node['children']) == 0 and node['status'] not in ('ONLINE',
                                                                 'AVAIL'):
            return [node]

        unavails = []
        for child in node['children']:
            unavails.extend(self.__find_not_online(child))
        return unavails

    def get_vdev(self, name, vname):
        try:
            with libzfs.ZFS() as zfs:
                pool = zfs.get(name)
                vdev = find_vdev(pool, vname)
                if not vdev:
                    raise CallError(f'{vname} not found in {name}',
                                    errno.ENOENT)
                return vdev.__getstate__()
        except libzfs.ZFSException as e:
            raise CallError(str(e))
예제 #28
0
class DatastoreService(Service):
    def _filters_to_queryset(self, filters, field_prefix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                if field_prefix:
                    name = field_prefix + name
                if op not in opmap:
                    raise Exception("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op == '!=':
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_prefix=field_prefix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise Exception("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    def __queryset_serialize(self, qs, extend=None, field_prefix=None):
        for i in self.middleware.threaded(lambda: list(qs)):
            yield django_modelobj_serialize(self.middleware,
                                            i,
                                            extend=extend,
                                            field_prefix=field_prefix)

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Str('extend'),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            Bool('count'),
            Bool('get'),
            Str('prefix'),
            register=True,
        ),
    )
    def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' )
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "prefix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        prefix = options.get('prefix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, prefix))

        order_by = options.get('order_by')
        if order_by:
            if prefix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + prefix + order[1:]
                    else:
                        order_by[i] = prefix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        result = list(
            self.__queryset_serialize(qs,
                                      extend=options.get('extend'),
                                      field_prefix=options.get('prefix')))

        if options.get('get') is True:
            return result[0]
        return result

    @accepts(Str('name'), Ref('query-options'))
    def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return self.query(name, None, options)

    @accepts(Str('name'), Dict('data', additional_attrs=True))
    def insert(self, name, data):
        """
        Insert a new entry to `name`.
        """
        model = self.__get_model(name)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        obj = model(**data)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'), Dict('data', additional_attrs=True))
    def update(self, name, id, data):
        """
        Update an entry `id` in `name`.
        """
        model = self.__get_model(name)
        obj = model.objects.get(pk=id)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        for k, v in list(data.items()):
            setattr(obj, k, v)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'))
    def delete(self, name, id):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        model.objects.get(pk=id).delete()
        return True

    @private
    def sql(self, query, params=None):
        cursor = connection.cursor()
        rv = None
        try:
            if params is None:
                cursor.executelocal(query)
            else:
                cursor.executelocal(query, params)
            rv = cursor.fetchall()
        finally:
            cursor.close()
        return rv
예제 #29
0
class ChartReleaseService(CRUDService):
    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'chart.release'
        cli_namespace = 'app.chart_release'

    ENTRY = Dict(
        'chart_release_entry',
        Str('name', required=True),
        Dict('info', additional_attrs=True),
        Dict('config', additional_attrs=True),
        List('hooks'),
        Int('version', required=True, description='Version of chart release'),
        Str('namespace', required=True),
        Dict(
            'chart_metadata',
            Str('name', required=True, description='Name of application'),
            Str('version', required=True,
                description='Version of application'),
            Str('latest_chart_version',
                required=True,
                description='Latest available version of application'),
            additional_attrs=True,
        ),
        Str('id', required=True),
        Str('catalog', required=True),
        Str('catalog_train', required=True),
        Str('path', required=True),
        Str('dataset', required=True),
        Str('status', required=True),
        List('used_ports',
             items=[
                 Dict(
                     'port',
                     Int('port', required=True),
                     Str('protocol', required=True),
                 )
             ],
             required=True),
        Dict(
            'pod_status',
            Int('available', required=True),
            Int('desired', required=True),
            required=True,
        ),
        Bool('update_available', required=True),
        Str('human_version',
            required=True,
            description='Human friendly version identifier for chart release'),
        Str('human_latest_version',
            required=True,
            description=
            'Human friendly latest available version identifier for chart release'
            ),
        Bool(
            'container_images_update_available',
            required=True,
            description=
            'Will be set when any image(s) being used in the chart release has a newer version available'
        ),
        Dict('portals', additional_attrs=True),
        Dict('chart_schema', null=True, additional_attrs=True),
        Dict('history', additional_attrs=True),
        Dict(
            'resources',
            Dict('storage_class', additional_attrs=True),
            List('persistent_volumes'),
            List('host_path_volumes'),
            List('locked_host_paths'),
            Dict('container_images', additional_attrs=True),
            List('truenas_certificates', items=[Int('certificate_id')]),
            List('truenas_certificate_authorities',
                 items=[Int('certificate_authority_id')]),
            *[List(r.value) for r in Resources],
        ),
    )

    @filterable
    async def query(self, filters, options):
        """
        Query available chart releases.

        `query-options.extra.retrieve_resources` is a boolean when set will retrieve existing kubernetes resources
        in the chart namespace.

        `query-options.extra.history` is a boolean when set will retrieve all chart version upgrades
        for a chart release.

        `query-options.extra.include_chart_schema` is a boolean when set will retrieve the schema being used by
        the chart release in question.

        `query-options.extra.resource_events` is a boolean when set will retrieve individual events of each resource.
        This only has effect if `query-options.extra.retrieve_resources` is set.
        """
        if not await self.middleware.call('kubernetes.validate_k8s_setup',
                                          False):
            # We use filter_list here to ensure that `options` are respected, options like get: true
            return filter_list([], filters, options)

        k8s_config = await self.middleware.call('kubernetes.config')
        update_catalog_config = {}
        catalogs = await self.middleware.call(
            'catalog.query', [], {'extra': {
                'item_details': True
            }})
        container_images = {}
        for image in await self.middleware.call('container.image.query'):
            for tag in image['repo_tags']:
                if not container_images.get(tag):
                    container_images[tag] = image

        for catalog in catalogs:
            update_catalog_config[catalog['label']] = {}
            for train in catalog['trains']:
                train_data = {}
                for catalog_item in catalog['trains'][train]:
                    max_version = catalog['trains'][train][catalog_item][
                        'latest_version'] or '0.0.0'
                    app_version = catalog['trains'][train][catalog_item][
                        'latest_app_version'] or '0.0.0'
                    train_data[catalog_item] = {
                        'chart_version': parse_version(max_version),
                        'app_version': app_version,
                    }

                update_catalog_config[catalog['label']][train] = train_data

        k8s_node_ip = await self.middleware.call('kubernetes.node_ip')
        options = options or {}
        extra = copy.deepcopy(options.get('extra', {}))
        retrieve_schema = extra.get('include_chart_schema')
        get_resources = extra.get('retrieve_resources')
        get_locked_paths = extra.get('retrieve_locked_paths')
        locked_datasets = await self.middleware.call(
            'zfs.dataset.locked_datasets') if get_locked_paths else []
        get_history = extra.get('history')
        if retrieve_schema:
            questions_context = await self.middleware.call(
                'catalog.get_normalised_questions_context')
        else:
            questions_context = None

        if filters and len(filters) == 1 and filters[0][:2] == ['id', '=']:
            extra['namespace_filter'] = [
                'metadata.namespace', '=',
                f'{CHART_NAMESPACE_PREFIX}{filters[0][-1]}'
            ]
            resources_filters = [extra['namespace_filter']]
        else:
            resources_filters = [[
                'metadata.namespace', '^', CHART_NAMESPACE_PREFIX
            ]]

        ports_used = collections.defaultdict(list)
        for node_port_svc in await self.middleware.call(
                'k8s.service.query',
            [['spec.type', '=', 'NodePort']] + resources_filters):
            release_name = node_port_svc['metadata']['namespace'][
                len(CHART_NAMESPACE_PREFIX):]
            ports_used[release_name].extend([{
                'port': p['node_port'],
                'protocol': p['protocol']
            } for p in node_port_svc['spec']['ports']])

        if get_resources:
            storage_mapping = await self.middleware.call(
                'chart.release.get_workload_storage_details')

        resources_mapping = await self.middleware.call(
            'chart.release.get_resources_with_workload_mapping', {
                'resource_events':
                extra.get('resource_events', False),
                'resource_filters':
                resources_filters,
                'resources': [
                    r.name for r in (Resources if get_resources else [
                        Resources.POD, Resources.DEPLOYMENT, Resources.
                        STATEFULSET
                    ])
                ],
            })
        resources = resources_mapping['resources']

        release_secrets = await self.middleware.call(
            'chart.release.releases_secrets', extra)
        releases = []
        for name, release in release_secrets.items():
            config = {}
            release_data = release['releases'].pop(0)
            cur_version = release_data['chart_metadata']['version']

            for rel_data in filter(
                    lambda r: r['chart_metadata']['version'] == cur_version,
                    itertools.chain(reversed(release['releases']),
                                    [release_data])):
                config.update(rel_data['config'])

            pods_status = resources_mapping['workload_status'][name]
            pod_diff = pods_status['available'] - pods_status['desired']
            status = 'ACTIVE'
            if pod_diff == 0 and pods_status['desired'] == 0:
                status = 'STOPPED'
            elif pod_diff < 0:
                status = 'DEPLOYING'

            # We will retrieve all host ports being used
            for pod in filter(lambda p: p['status']['phase'] == 'Running',
                              resources[Resources.POD.value][name]):
                for container in pod['spec']['containers']:
                    ports_used[name].extend([{
                        'port': p['host_port'],
                        'protocol': p['protocol']
                    } for p in (container['ports'] or []) if p['host_port']])

            release_data.update({
                'path':
                os.path.join('/mnt', k8s_config['dataset'], 'releases', name),
                'dataset':
                os.path.join(k8s_config['dataset'], 'releases', name),
                'config':
                config,
                'status':
                status,
                'used_ports':
                ports_used[name],
                'pod_status':
                pods_status,
            })

            container_images_normalized = {
                i_name: {
                    'id':
                    image_details.get('id'),
                    'update_available':
                    image_details.get('update_available', False)
                }
                for i_name, image_details in map(
                    lambda i: (i, container_images.get(i, {})),
                    list(
                        set(c['image'] for workload_type in ('deployments',
                                                             'statefulsets')
                            for workload in resources[workload_type][name]
                            for c in workload['spec']['template']['spec']
                            ['containers'])))
            }
            if get_resources:
                release_resources = {
                    'storage_class':
                    storage_mapping['storage_classes'][get_storage_class_name(
                        name)],
                    'persistent_volumes':
                    storage_mapping['persistent_volumes'][name],
                    'host_path_volumes':
                    await self.host_path_volumes(
                        itertools.chain(*[
                            resources[getattr(Resources, k).value][name]
                            for k in ('DEPLOYMENT', 'STATEFULSET')
                        ])),
                    **{r.value: resources[r.value][name]
                       for r in Resources},
                    'container_images':
                    container_images_normalized,
                    'truenas_certificates': [
                        v['id'] for v in release_data['config'].get(
                            'ixCertificates', {}).values()
                    ],
                    'truenas_certificate_authorities': [
                        v['id'] for v in release_data['config'].get(
                            'ixCertificateAuthorities', {}).values()
                    ],
                }
                if get_locked_paths:
                    release_resources['locked_host_paths'] = [
                        v['host_path']['path']
                        for v in release_resources['host_path_volumes']
                        if await self.middleware.call(
                            'pool.dataset.path_in_locked_datasets',
                            v['host_path']['path'], locked_datasets)
                    ]

                release_data['resources'] = release_resources

            if get_history:
                release_data['history'] = release['history']
                for k, v in release_data['history'].items():
                    r_app_version = self.normalize_app_version_of_chart_release(
                        v)
                    release_data['history'][k].update({
                        'human_version':
                        f'{r_app_version}_{parse_version(v["chart_metadata"]["version"])}',
                    })

            current_version = parse_version(
                release_data['chart_metadata']['version'])
            catalog_version_dict = update_catalog_config.get(
                release_data['catalog'],
                {}).get(release_data['catalog_train'],
                        {}).get(release_data['chart_metadata']['name'], {})
            latest_version = catalog_version_dict.get('chart_version',
                                                      current_version)
            latest_app_version = catalog_version_dict.get('app_version')
            release_data['update_available'] = latest_version > current_version

            app_version = self.normalize_app_version_of_chart_release(
                release_data)
            if release_data['chart_metadata']['name'] == 'ix-chart':
                # Latest app version for ix-chart remains same
                latest_app_version = app_version

            for key, app_v, c_v in (
                ('human_version', app_version, current_version),
                ('human_latest_version', latest_app_version, latest_version),
            ):
                if app_v:
                    release_data[key] = f'{app_v}_{c_v}'
                else:
                    release_data[key] = str(c_v)

            if retrieve_schema:
                chart_path = os.path.join(
                    release_data['path'], 'charts',
                    release_data['chart_metadata']['version'])
                if os.path.exists(chart_path):
                    release_data['chart_schema'] = await self.middleware.call(
                        'catalog.item_version_details', chart_path,
                        questions_context)
                else:
                    release_data['chart_schema'] = None

            release_data['container_images_update_available'] = any(
                details['update_available']
                for details in container_images_normalized.values())
            release_data['chart_metadata']['latest_chart_version'] = str(
                latest_version)
            release_data['portals'] = await self.middleware.call(
                'chart.release.retrieve_portals_for_chart_release',
                release_data, k8s_node_ip)

            if 'icon' not in release_data['chart_metadata']:
                release_data['chart_metadata']['icon'] = None

            releases.append(release_data)

        return filter_list(releases, filters, options)

    @private
    def normalize_app_version_of_chart_release(self, release_data):
        app_version = None
        if release_data['chart_metadata']['name'] == 'ix-chart':
            image_config = release_data['config'].get('image') or {}
            if all(k in image_config for k in ('tag', 'repository')):
                # TODO: Let's see if we can find sane versioning for `latest` from upstream
                if image_config['tag'] == 'latest':
                    app_version = f'{image_config["repository"]}:{image_config["tag"]}'
                else:
                    app_version = image_config['tag']
        else:
            app_version = release_data['chart_metadata'].get('appVersion')
        return app_version

    @private
    async def host_path_volumes(self, resources):
        host_path_volumes = []
        for resource in resources:
            for volume in filter(
                    lambda v: (v.get('host_path') or {}).get('path'),
                    resource['spec']['template']['spec']['volumes'] or []):
                host_path_volumes.append(copy.deepcopy(volume))
        return host_path_volumes

    @private
    async def normalise_and_validate_values(self,
                                            item_details,
                                            values,
                                            update,
                                            release_ds,
                                            release_data=None):
        dict_obj = await self.middleware.call(
            'chart.release.validate_values',
            item_details,
            values,
            update,
            release_data,
        )
        return await self.middleware.call(
            'chart.release.get_normalised_values', dict_obj, values, update, {
                'release': {
                    'name': release_ds.split('/')[-1],
                    'dataset': release_ds,
                    'path': os.path.join('/mnt', release_ds),
                },
                'actions': [],
            })

    @private
    async def perform_actions(self, context):
        for action in context['actions']:
            await self.middleware.call(f'chart.release.{action["method"]}',
                                       *action['args'])

    @accepts(
        Dict(
            'chart_release_create',
            Dict('values', additional_attrs=True),
            Str('catalog', required=True),
            Str('item', required=True),
            Str('release_name',
                required=True,
                validators=[Match(r'^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')]),
            Str('train', default='charts'),
            Str('version', default='latest'),
        ))
    @job(lock=lambda args: f'chart_release_create_{args[0]["release_name"]}')
    async def do_create(self, job, data):
        """
        Create a chart release for a catalog item.

        `release_name` is the name which will be used to identify the created chart release.

        `catalog` is a valid catalog id where system will look for catalog `item` details.

        `train` is which train to look for under `catalog` i.e stable / testing etc.

        `version` specifies the catalog `item` version.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        if await self.query([['id', '=', data['release_name']]]):
            raise CallError(
                f'Chart release with {data["release_name"]} already exists.',
                errno=errno.EEXIST)

        catalog = await self.middleware.call('catalog.get_instance',
                                             data['catalog'])
        item_details = await self.middleware.call(
            'catalog.get_item_details', data['item'], {
                'catalog': data['catalog'],
                'train': data['train'],
            })
        version = data['version']
        if version == 'latest':
            version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions',
                item_details['versions'])

        if version not in item_details['versions']:
            raise CallError(
                f'Unable to locate "{data["version"]}" catalog item version.',
                errno=errno.ENOENT)

        item_details = item_details['versions'][version]
        await self.middleware.call('catalog.version_supported_error_check',
                                   item_details)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  data['release_name'])
        # The idea is to validate the values provided first and if it passes our validation test, we
        # can move forward with setting up the datasets and installing the catalog item
        new_values = data['values']
        new_values, context = await self.normalise_and_validate_values(
            item_details, new_values, False, release_ds)

        job.set_progress(25, 'Initial Validation completed')

        # Now that we have completed validation for the item in question wrt values provided,
        # we will now perform the following steps
        # 1) Create release datasets
        # 2) Copy chart version into release/charts dataset
        # 3) Install the helm chart
        # 4) Create storage class
        try:
            job.set_progress(30, 'Creating chart release datasets')

            for dataset in await self.release_datasets(release_ds):
                if not await self.middleware.call('zfs.dataset.query',
                                                  [['id', '=', dataset]]):
                    await self.middleware.call('zfs.dataset.create', {
                        'name': dataset,
                        'type': 'FILESYSTEM'
                    })
                    await self.middleware.call('zfs.dataset.mount', dataset)

            job.set_progress(45, 'Created chart release datasets')

            chart_path = os.path.join('/mnt', release_ds, 'charts', version)
            await self.middleware.run_in_thread(
                lambda: shutil.copytree(item_details['location'], chart_path))

            job.set_progress(55, 'Completed setting up chart release')
            # Before finally installing the release, we will perform any actions which might be required
            # for the release to function like creating/deleting ix-volumes
            await self.perform_actions(context)

            namespace_name = get_namespace(data['release_name'])

            job.set_progress(65,
                             f'Creating {namespace_name} for chart release')
            namespace_body = {
                'metadata': {
                    'labels': {
                        'catalog': data['catalog'],
                        'catalog_train': data['train'],
                        'catalog_branch': catalog['branch'],
                    },
                    'name': namespace_name,
                }
            }
            if not await self.middleware.call(
                    'k8s.namespace.query',
                [['metadata.name', '=', namespace_name]]):
                await self.middleware.call('k8s.namespace.create',
                                           {'body': namespace_body})
            else:
                await self.middleware.call('k8s.namespace.update',
                                           namespace_name,
                                           {'body': namespace_body})

            job.set_progress(75, 'Installing Catalog Item')

            new_values = await add_context_to_configuration(
                new_values, {
                    CONTEXT_KEY_NAME: {
                        **get_action_context(data['release_name']),
                        'operation': 'INSTALL',
                        'isInstall': True,
                    }
                }, self.middleware)

            await self.middleware.call(
                'chart.release.create_update_storage_class_for_chart_release',
                data['release_name'], os.path.join(release_ds, 'volumes'))

            # We will install the chart now and force the installation in an ix based namespace
            # https://github.com/helm/helm/issues/5465#issuecomment-473942223
            await self.middleware.call('chart.release.helm_action',
                                       data['release_name'], chart_path,
                                       new_values, 'install')
        except Exception:
            # Do a rollback here
            # Let's uninstall the release as well if it did get installed ( it is possible this might have happened )
            if await self.middleware.call('chart.release.query',
                                          [['id', '=', data['release_name']]]):
                delete_job = await self.middleware.call(
                    'chart.release.delete', data['release_name'])
                await delete_job.wait()
                if delete_job.error:
                    self.logger.error(
                        'Failed to uninstall helm chart release: %s',
                        delete_job.error)
            else:
                await self.post_remove_tasks(data['release_name'])

            raise
        else:
            await self.middleware.call('chart.release.refresh_events_state',
                                       data['release_name'])
            job.set_progress(100, 'Chart release created')
            return await self.get_instance(data['release_name'])

    @accepts(Str('chart_release'),
             Dict(
                 'chart_release_update',
                 Dict('values', additional_attrs=True),
             ))
    @job(lock=lambda args: f'chart_release_update_{args[0]}')
    async def do_update(self, job, chart_release, data):
        """
        Update an existing chart release.

        `values` is configuration specified for the catalog item version in question which will be used to
        create the chart release.
        """
        release = await self.get_instance(chart_release)
        release_orig = copy.deepcopy(release)
        chart_path = os.path.join(release['path'], 'charts',
                                  release['chart_metadata']['version'])
        if not os.path.exists(chart_path):
            raise CallError(
                f'Unable to locate {chart_path!r} chart version for updating {chart_release!r} chart release',
                errno=errno.ENOENT)

        version_details = await self.middleware.call(
            'catalog.item_version_details', chart_path)
        config = release['config']
        config.update(data['values'])
        # We use update=False because we want defaults to be populated again if they are not present in the payload
        # Why this is not dangerous is because the defaults will be added only if they are not present/configured for
        # the chart release.
        config, context = await self.normalise_and_validate_values(
            version_details,
            config,
            False,
            release['dataset'],
            release_orig,
        )

        job.set_progress(25, 'Initial Validation complete')

        await self.perform_actions(context)

        config = await add_context_to_configuration(
            config, {
                CONTEXT_KEY_NAME: {
                    **get_action_context(chart_release),
                    'operation': 'UPDATE',
                    'isUpdate': True,
                }
            }, self.middleware)

        await self.middleware.call('chart.release.helm_action', chart_release,
                                   chart_path, config, 'update')

        job.set_progress(90, 'Syncing secrets for chart release')
        await self.middleware.call('chart.release.sync_secrets_for_release',
                                   chart_release)
        await self.middleware.call('chart.release.refresh_events_state',
                                   chart_release)

        job.set_progress(100, 'Update completed for chart release')
        return await self.get_instance(chart_release)

    @accepts(Str('release_name'),
             Dict(
                 'options',
                 Bool('delete_unused_images', default=False),
             ))
    @job(lock=lambda args: f'chart_release_delete_{args[0]}')
    async def do_delete(self, job, release_name, options):
        """
        Delete existing chart release.

        This will delete the chart release from the kubernetes cluster and also remove any associated volumes / data.
        To clarify, host path volumes will not be deleted which live outside the chart release dataset.
        """
        # For delete we will uninstall the release first and then remove the associated datasets
        await self.middleware.call('kubernetes.validate_k8s_setup')
        chart_release = await self.get_instance(
            release_name, {'extra': {
                'retrieve_resources': True
            }})

        cp = await run([
            'helm', 'uninstall', release_name, '-n',
            get_namespace(release_name)
        ],
                       check=False)
        if cp.returncode:
            raise CallError(
                f'Unable to uninstall "{release_name}" chart release: {cp.stderr}'
            )

        job.set_progress(50, f'Uninstalled {release_name}')
        job.set_progress(75, f'Waiting for {release_name!r} pods to terminate')
        await self.middleware.call('chart.release.wait_for_pods_to_terminate',
                                   get_namespace(release_name))

        await self.post_remove_tasks(release_name, job)

        await self.middleware.call(
            'chart.release.remove_chart_release_from_events_state',
            release_name)
        await self.middleware.call(
            'chart.release.clear_chart_release_portal_cache', release_name)
        await self.middleware.call('alert.oneshot_delete',
                                   'ChartReleaseUpdate', release_name)
        if options['delete_unused_images']:
            job.set_progress(97, 'Deleting unused container images')
            failed = await self.middleware.call(
                'chart.release.delete_unused_app_images', chart_release)
            if failed:
                msg = '\n'
                for i, v in failed.items():
                    msg += f'{i+1}) {v[0]} ({v[1]})\n'
                raise CallError(
                    f'{release_name!r} was deleted but unable to delete following images:{msg}'
                )

        job.set_progress(100, f'{release_name!r} chart release deleted')
        return True

    @private
    async def post_remove_tasks(self, release_name, job=None):
        await self.remove_storage_class_and_dataset(release_name, job)
        await self.middleware.call('k8s.namespace.delete',
                                   get_namespace(release_name))

    @private
    async def remove_storage_class_and_dataset(self, release_name, job=None):
        storage_class_name = get_storage_class_name(release_name)
        if await self.middleware.call(
                'k8s.storage_class.query',
            [['metadata.name', '=', storage_class_name]]):
            if job:
                job.set_progress(85,
                                 f'Removing {release_name!r} storage class')
            try:
                await self.middleware.call('k8s.storage_class.delete',
                                           storage_class_name)
            except Exception as e:
                self.logger.error('Failed to remove %r storage class: %s',
                                  storage_class_name, e)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  release_name)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        pvc_volume_ds = os.path.join(release_ds, 'volumes')
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '=',
                pvc_volume_ds
        ]]):
            await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])

        failed_zfs_volumes = []
        # We would like to delete openebs zfs volumes ( not actual zfs volumes ) in openebs namespace
        for zfs_volume in await self.middleware.call(
                'k8s.zv.query', [['spec.poolName', '=', pvc_volume_ds]]):
            try:
                await self.middleware.call('k8s.zv.delete',
                                           zfs_volume['metadata']['name'])
            except Exception:
                # It's perfectly fine if this fails as functionality wise this change is just cosmetic
                # and is essentially cleaning up leftover zfs volume entries from k8s db
                failed_zfs_volumes.append(zfs_volume['metadata']['name'])

        if failed_zfs_volumes:
            self.logger.error(
                'Failed to delete %r zfs volumes when deleting %r chart release',
                ', '.join(failed_zfs_volumes), release_name)

        if await self.middleware.call('zfs.dataset.query',
                                      [['id', '=', release_ds]]):
            if job:
                job.set_progress(95, f'Removing {release_ds!r} dataset')
            await self.middleware.call('zfs.dataset.delete', release_ds, {
                'recursive': True,
                'force': True
            })

    @private
    async def release_datasets(self, release_dataset):
        return [release_dataset] + [
            os.path.join(release_dataset, k)
            for k in ('charts', 'volumes', 'volumes/ix_volumes')
        ]

    @private
    async def get_chart_namespace_prefix(self):
        return CHART_NAMESPACE_PREFIX
예제 #30
0
class PoolService(CRUDService):

    GELI_KEYPATH = '/data/geli'

    class Config:
        datastore = 'storage.volume'
        datastore_extend = 'pool.pool_extend'
        datastore_prefix = 'vol_'

    @accepts()
    async def filesystem_choices(self):
        vol_names = [vol['name'] for vol in (await self.query())]
        return [
            y['name']
            for y in await self.middleware.call('zfs.dataset.query', [(
                'name', 'rnin', '.system'), ('pool', 'in', vol_names)])
        ]

    def _topology(self, x, geom_scan=True):
        """
        Transform topology output from libzfs to add `device` and make `type` uppercase.
        """
        if isinstance(x, dict):
            path = x.get('path')
            if path is not None:
                device = None
                if path.startswith('/dev/'):
                    device = self.middleware.call_sync('disk.label_to_dev',
                                                       path[5:], geom_scan)
                x['device'] = device
            for key in x:
                if key == 'type' and isinstance(x[key], str):
                    x[key] = x[key].upper()
                else:
                    x[key] = self._topology(x[key], False)
        elif isinstance(x, list):
            for i, entry in enumerate(x):
                x[i] = self._topology(x[i], False)
        return x

    @private
    def pool_extend(self, pool):
        """
        If pool is encrypted we need to check if the pool is imported
        or if all geli providers exist.
        """
        try:
            zpool = self.middleware.call_sync('zfs.pool.query',
                                              [('id', '=', pool['name'])])[0]
        except Exception:
            zpool = None

        if zpool:
            pool['status'] = zpool['status']
            pool['scan'] = zpool['scan']
            pool['topology'] = self._topology(zpool['groups'])
        else:
            pool.update({
                'status': 'OFFLINE',
                'scan': None,
                'topology': None,
            })

        if pool['encrypt'] > 0:
            if zpool:
                pool['is_decrypted'] = True
            else:
                decrypted = True
                for ed in self.middleware.call_sync(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    if not os.path.exists(
                            f'/dev/{ed["encrypted_provider"]}.eli'):
                        decrypted = False
                        break
                pool['is_decrypted'] = decrypted
        else:
            pool['is_decrypted'] = True
        return pool

    @item_method
    @accepts(Int('id', required=False))
    async def get_disks(self, oid=None):
        """
        Get all disks in use by pools.
        If `id` is provided only the disks from the given pool `id` will be returned.
        """
        filters = []
        if oid:
            filters.append(('id', '=', oid))
        for pool in await self.query(filters):
            if pool['is_decrypted']:
                async for i in await self.middleware.call(
                        'zfs.pool.get_disks', pool['name']):
                    yield i
            else:
                for encrypted_disk in await self.middleware.call(
                        'datastore.query', 'storage.encrypteddisk',
                    [('encrypted_volume', '=', pool['id'])]):
                    disk = {
                        k[len("disk_"):]: v
                        for k, v in encrypted_disk["encrypted_disk"].items()
                    }
                    name = await self.middleware.call("disk.get_name", disk)
                    if os.path.exists(os.path.join("/dev", name)):
                        yield name

    @item_method
    @accepts(Int('id'))
    async def download_encryption_key(self, oid):
        """
        Download encryption key for a given pool `id`.
        """
        pool = await self.query([('id', '=', oid)], {'get': True})
        if not pool['encryptkey']:
            return None

        job_id, url = await self.middleware.call(
            'core.download', 'filesystem.get',
            [os.path.join(self.GELI_KEYPATH, f"{pool['encryptkey']}.key")],
            'geli.key')
        return url

    @private
    def configure_resilver_priority(self):
        """
        Configure resilver priority based on user selected off-peak hours.
        """
        resilver = self.middleware.call_sync('datastore.config',
                                             'storage.resilver')

        if not resilver['enabled'] or not resilver['weekday']:
            return

        higher_prio = False
        weekdays = map(lambda x: int(x), resilver['weekday'].split(','))
        now = datetime.now()
        now_t = now.time()
        # end overlaps the day
        if resilver['begin'] > resilver['end']:
            if now.isoweekday() in weekdays and now_t >= resilver['begin']:
                higher_prio = True
            else:
                lastweekday = now.isoweekday() - 1
                if lastweekday == 0:
                    lastweekday = 7
                if lastweekday in weekdays and now_t < resilver['end']:
                    higher_prio = True
        # end does not overlap the day
        else:
            if now.isoweekday() in weekdays and now_t >= resilver[
                    'begin'] and now_t < resilver['end']:
                higher_prio = True

        if higher_prio:
            resilver_delay = 0
            resilver_min_time_ms = 9000
            scan_idle = 0
        else:
            resilver_delay = 2
            resilver_min_time_ms = 3000
            scan_idle = 50

        sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay
        sysctl.filter(
            'vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms
        sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle

    @accepts()
    async def import_find(self):
        """
        Get a list of pools available for import with the following details:
        name, guid, status, hostname.
        """

        existing_guids = [
            i['guid'] for i in await self.middleware.call('pool.query')
        ]

        for pool in await self.middleware.call('zfs.pool.find_import'):
            if pool['status'] == 'UNAVAIL':
                continue
            # Exclude pools with same guid as existing pools (in database)
            # It could be the pool is in the database but was exported/detached for some reason
            # See #6808
            if pool['guid'] in existing_guids:
                continue
            entry = {}
            for i in ('name', 'guid', 'status', 'hostname'):
                entry[i] = pool[i]
            yield entry

    @accepts(
        Dict(
            'pool_import',
            Str('guid', required=True),
            Str('name'),
            Str('passphrase', private=True),
            List('devices', items=[Str('device')]),
        ))
    @job(lock='import_pool', pipes=['input'], check_pipes=False)
    async def import_pool(self, job, data):
        """
        Import a pool.

        Errors:
            ENOENT - Pool not found
        """

        pool = None
        for p in await self.middleware.call('zfs.pool.find_import'):
            if p['guid'] == data['guid']:
                pool = p
                break
        if pool is None:
            raise CallError(f'Pool with guid "{data["guid"]}" not found',
                            errno.ENOENT)

        if data['devices']:
            job.check_pipe("input")
            args = [job.pipes.input.r, data['passphrase'], data['devices']]
        else:
            args = []

        await self.middleware.call('notifier.volume_import',
                                   data.get('name') or pool['name'],
                                   data['guid'], *args)
        return True

    @accepts(Str('volume'), Str('fs_type'),
             Dict('fs_options', additional_attrs=True), Str('dst_path'))
    @job(lock=lambda args: 'volume_import', logs=True)
    async def import_disk(self, job, volume, fs_type, fs_options, dst_path):
        job.set_progress(None, description="Mounting")

        src = os.path.join('/var/run/importcopy/tmpdir',
                           os.path.relpath(volume, '/'))

        if os.path.exists(src):
            os.rmdir(src)

        try:
            os.makedirs(src)

            async with KernelModuleContextManager({
                    "msdosfs": "msdosfs_iconv",
                    "ntfs": "fuse"
            }.get(fs_type)):
                async with MountFsContextManager(self.middleware, volume, src,
                                                 fs_type, fs_options, ["ro"]):
                    job.set_progress(None, description="Importing")

                    line = [
                        '/usr/local/bin/rsync', '--info=progress2',
                        '--modify-window=1', '-rltvh', '--no-perms', src + '/',
                        dst_path
                    ]
                    rsync_proc = await Popen(
                        line,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        bufsize=0,
                        preexec_fn=os.setsid,
                    )
                    try:
                        progress_buffer = JobProgressBuffer(job)
                        while True:
                            line = await rsync_proc.stdout.readline()
                            job.logs_fd.write(line)
                            if line:
                                try:
                                    line = line.decode("utf-8",
                                                       "ignore").strip()
                                    bits = re.split("\s+", line)
                                    if len(bits) == 6 and bits[1].endswith(
                                            "%") and bits[1][:-1].isdigit():
                                        progress_buffer.set_progress(
                                            int(bits[1][:-1]))
                                    elif not line.endswith('/'):
                                        if (line not in [
                                                'sending incremental file list'
                                        ] and 'xfr#' not in line):
                                            progress_buffer.set_progress(
                                                None, extra=line)
                                except Exception:
                                    logger.warning(
                                        'Parsing error in rsync task',
                                        exc_info=True)
                            else:
                                break

                        progress_buffer.flush()
                        await rsync_proc.wait()
                        if rsync_proc.returncode != 0:
                            raise Exception("rsync failed with exit code %r" %
                                            rsync_proc.returncode)
                    except asyncio.CancelledError:
                        rsync_proc.kill()
                        raise

                    job.set_progress(100, description="Done", extra="")
        finally:
            os.rmdir(src)

    @accepts()
    def import_disk_msdosfs_locales(self):
        return [
            locale.strip()
            for locale in subprocess.check_output(["locale", "-a"],
                                                  encoding="utf-8").split("\n")
            if locale.strip()
        ]

    """
    These methods are hacks for old UI which supports only one volume import at a time
    """

    dismissed_import_disk_jobs = set()

    @private
    async def get_current_import_disk_job(self):
        import_jobs = await self.middleware.call(
            'core.get_jobs', [('method', '=', 'pool.import_disk')])
        not_dismissed_import_jobs = [
            job for job in import_jobs
            if job["id"] not in self.dismissed_import_disk_jobs
        ]
        if not_dismissed_import_jobs:
            return not_dismissed_import_jobs[0]

    @private
    async def dismiss_current_import_disk_job(self):
        current_import_job = await self.get_current_import_disk_job()
        if current_import_job:
            self.dismissed_import_disk_jobs.add(current_import_job["id"])