示例#1
0
class DiskService(CRUDService):
    @filterable
    async def query(self, filters=None, options=None):
        if filters is None:
            filters = []
        if options is None:
            options = {}
        options['prefix'] = 'disk_'
        filters.append(('expiretime', '=', None))
        options['extend'] = 'disk.disk_extend'
        return await self.middleware.call('datastore.query', 'storage.disk',
                                          filters, options)

    @private
    def disk_extend(self, disk):
        disk.pop('enabled', None)
        return disk

    @private
    def get_name(self, disk):
        if disk["multipath_name"]:
            return f"multipath/{disk['multipath_name']}"
        else:
            return disk["name"]

    @accepts(Bool("join_partitions"))
    async def get_unused(self, join_partitions=False):
        """
        Helper method to get all disks that are not in use, either by the boot
        pool or the user pools.
        """
        disks = await self.query([('name', 'nin', await self.get_reserved())])

        if join_partitions:
            for disk in disks:
                disk["partitions"] = await self.__get_partitions(disk)

        return disks

    @accepts(Dict(
        'options',
        Bool('unused', default=False),
    ))
    def get_encrypted(self, options):
        """
        Get all geli providers

        It might be an entire disk or a partition of type freebsd-zfs
        """
        providers = []

        disks_blacklist = []
        if options['unused']:
            disks_blacklist += self.middleware.call_sync('disk.get_reserved')

        geom.scan()
        klass_part = geom.class_by_name('PART')
        klass_label = geom.class_by_name('LABEL')
        if not klass_part:
            return providers

        for g in klass_part.geoms:
            for p in g.providers:

                if p.config['type'] != 'freebsd-zfs':
                    continue

                disk = p.geom.consumer.provider.name
                if disk in disks_blacklist:
                    continue

                try:
                    subprocess.run(
                        ['geli', 'dump', p.name],
                        stdout=subprocess.DEVNULL,
                        stderr=subprocess.DEVNULL,
                        check=True,
                    )
                except subprocess.CalledProcessError:
                    continue

                dev = None
                if klass_label:
                    for g in klass_label.geoms:
                        if g.name == p.name:
                            dev = g.provider.name
                            break

                if dev is None:
                    dev = p.name

                providers.append({'name': p.name, 'dev': dev, 'disk': disk})

        return providers

    @accepts(
        List('devices', items=[Str('device')]),
        Str('passphrase', private=True),
    )
    @job(pipes=['input'])
    def decrypt(self, job, devices, passphrase=None):
        """
        Decrypt `devices` using uploaded encryption key
        """
        with tempfile.NamedTemporaryFile(dir='/tmp/') as f:
            f.write(job.pipes.input.r.read())
            f.flush()

            if passphrase:
                passf = tempfile.NamedTemporaryFile(mode='w+', dir='/tmp/')
                os.chmod(passf.name, 0o600)
                passf.write(passphrase)
                passf.flush()
                passphrase = passf.name

            failed = []
            for dev in devices:
                try:
                    self.middleware.call_sync(
                        'notifier.geli_attach_single',
                        dev,
                        f.name,
                        passphrase,
                    )
                except Exception:
                    failed.append(dev)

            if passphrase:
                passf.close()

            if failed:
                raise CallError(
                    f'The following devices failed to attach: {", ".join(failed)}'
                )
        return True

    @private
    async def get_reserved(self):
        reserved = [
            i async for i in await self.middleware.call('boot.get_disks')
        ]
        reserved += [
            i async for i in await self.middleware.call('pool.get_disks')
        ]
        reserved += [i async for i in self.__get_iscsi_targets()]
        return reserved

    async def __get_iscsi_targets(self):
        iscsi_target_extent_paths = [
            extent["iscsi_target_extent_path"]
            for extent in await self.middleware.call(
                'datastore.query', 'services.iscsitargetextent', [(
                    'iscsi_target_extent_type', '=', 'Disk')])
        ]
        for disk in await self.middleware.call(
                'datastore.query', 'storage.disk',
            [('disk_identifier', 'in', iscsi_target_extent_paths)]):
            yield disk["disk_name"]

    async def __get_partitions(self, disk):
        partitions = []
        name = await self.middleware.call("disk.get_name", disk)
        for path in glob.glob(f"/dev/%s[a-fps]*" % name) or [f"/dev/{name}"]:
            info = (await run("/usr/sbin/diskinfo",
                              path)).stdout.decode("utf-8").split("\t")
            if len(info) > 3:
                partitions.append({
                    "path": path,
                    "capacity": int(info[2]),
                })

        return partitions

    async def __camcontrol_list(self):
        """
        Parse camcontrol devlist -v output to gather
        controller id, channel no and driver from a device

        Returns:
            dict(devname) = dict(drv, controller, channel)
        """
        """
        Hacky workaround

        It is known that at least some HPT controller have a bug in the
        camcontrol devlist output with multiple controllers, all controllers
        will be presented with the same driver with index 0
        e.g. two hpt27xx0 instead of hpt27xx0 and hpt27xx1

        What we do here is increase the controller id by its order of
        appearance in the camcontrol output
        """
        hptctlr = defaultdict(int)

        re_drv_cid = re.compile(r'.* on (?P<drv>.*?)(?P<cid>[0-9]+) bus',
                                re.S | re.M)
        re_tgt = re.compile(
            r'target (?P<tgt>[0-9]+) .*?lun (?P<lun>[0-9]+) .*\((?P<dv1>[a-z]+[0-9]+),(?P<dv2>[a-z]+[0-9]+)\)',
            re.S | re.M)
        drv, cid, tgt, lun, dev, devtmp = (None, ) * 6

        camcontrol = {}
        proc = await Popen(['camcontrol', 'devlist', '-v'],
                           stdout=subprocess.PIPE)
        for line in (await proc.communicate())[0].splitlines():
            line = line.decode()
            if not line.startswith('<'):
                reg = re_drv_cid.search(line)
                if not reg:
                    continue
                drv = reg.group('drv')
                if drv.startswith('hpt'):
                    cid = hptctlr[drv]
                    hptctlr[drv] += 1
                else:
                    cid = reg.group('cid')
            else:
                reg = re_tgt.search(line)
                if not reg:
                    continue
                tgt = reg.group('tgt')
                lun = reg.group('lun')
                dev = reg.group('dv1')
                devtmp = reg.group('dv2')
                if dev.startswith('pass'):
                    dev = devtmp
                camcontrol[dev] = {
                    'drv': drv,
                    'controller': int(cid),
                    'channel': int(tgt),
                    'lun': int(lun)
                }
        return camcontrol

    async def __get_twcli(self, controller):

        re_port = re.compile(r'^p(?P<port>\d+).*?\bu(?P<unit>\d+)\b',
                             re.S | re.M)
        proc = await Popen(
            ['/usr/local/sbin/tw_cli', f'/c{controller}', 'show'],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        output = (await proc.communicate())[0].decode()

        units = {}
        for port, unit in re_port.findall(output):
            units[int(unit)] = int(port)
        return units

    async def __get_smartctl_args(self, devname):
        args = [f'/dev/{devname}']
        camcontrol = await self.__camcontrol_list()
        info = camcontrol.get(devname)
        if info is not None:
            if info.get('drv') == 'rr274x_3x':
                channel = info['channel'] + 1
                if channel > 16:
                    channel -= 16
                elif channel > 8:
                    channel -= 8
                args = [
                    '/dev/%s' % info['drv'], '-d',
                    'hpt,%d/%d' % (info['controller'] + 1, channel)
                ]
            elif info.get('drv').startswith('arcmsr'):
                args = [
                    '/dev/%s%d' % (info['drv'], info['controller']), '-d',
                    'areca,%d' % (info['lun'] + 1 + (info['channel'] * 8), )
                ]
            elif info.get('drv').startswith('hpt'):
                args = [
                    '/dev/%s' % info['drv'], '-d',
                    'hpt,%d/%d' % (info['controller'] + 1, info['channel'] + 1)
                ]
            elif info.get('drv') == 'ciss':
                args = [
                    '/dev/%s%d' % (info['drv'], info['controller']), '-d',
                    'cciss,%d' % (info['channel'], )
                ]
            elif info.get('drv') == 'twa':
                twcli = await self.__get_twcli(info['controller'])
                args = [
                    '/dev/%s%d' % (info['drv'], info['controller']), '-d',
                    '3ware,%d' % (twcli.get(info['channel'], -1), )
                ]
        return args

    @private
    async def toggle_smart_off(self, devname):
        args = await self.__get_smartctl_args(devname)
        await run('/usr/local/sbin/smartctl',
                  '--smart=off',
                  *args,
                  check=False)

    @private
    async def toggle_smart_on(self, devname):
        args = await self.__get_smartctl_args(devname)
        await run('/usr/local/sbin/smartctl', '--smart=on', *args, check=False)

    @private
    async def serial_from_device(self, name):
        args = await self.__get_smartctl_args(name)
        p1 = await Popen(['smartctl', '-i'] + args, stdout=subprocess.PIPE)
        output = (await p1.communicate())[0].decode()
        search = re.search(r'Serial Number:\s+(?P<serial>.+)', output, re.I)
        if search:
            return search.group('serial')
        return None

    @private
    @accepts(Str('name'))
    async def device_to_identifier(self, name):
        """
        Given a device `name` (e.g. da0) returns an unique identifier string
        for this device.
        This identifier is in the form of {type}string, "type" can be one of
        the following:
          - serial_lunid - for disk serial concatenated with the lunid
          - serial - disk serial
          - uuid - uuid of a ZFS GPT partition
          - label - label name from geom label
          - devicename - name of the device if any other could not be used/found

        Returns:
            str - identifier
        """
        await self.middleware.run_in_thread(geom.scan)

        g = geom.geom_by_name('DISK', name)
        if g and g.provider.config.get('ident'):
            serial = g.provider.config['ident']
            lunid = g.provider.config.get('lunid')
            if lunid:
                return f'{{serial_lunid}}{serial}_{lunid}'
            return f'{{serial}}{serial}'

        serial = await self.serial_from_device(name)
        if serial:
            return f'{{serial}}{serial}'

        klass = geom.class_by_name('PART')
        if klass:
            for g in klass.geoms:
                for p in g.providers:
                    if p.name == name:
                        # freebsd-zfs partition
                        if p.config[
                                'rawtype'] == '516e7cba-6ecf-11d6-8ff8-00022d09712b':
                            return f'{{uuid}}{p.config["rawuuid"]}'

        g = geom.geom_by_name('LABEL', name)
        if g:
            return f'{{label}}{g.provider.name}'

        g = geom.geom_by_name('DEV', name)
        if g:
            return f'{{devicename}}{name}'

        return ''

    @private
    @accepts(Str('name'))
    async def sync(self, name):
        """
        Syncs a disk `name` with the database cache.
        """
        # Skip sync disks on backup node
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('notifier.failover_licensed')
                and await self.middleware.call('notifier.failover_status')
                == 'BACKUP'):
            return

        # Do not sync geom classes like multipath/hast/etc
        if name.find("/") != -1:
            return

        disks = list((await self.middleware.call('device.get_info',
                                                 'DISK')).keys())

        # Abort if the disk is not recognized as an available disk
        if name not in disks:
            return
        ident = await self.device_to_identifier(name)
        qs = await self.middleware.call('datastore.query', 'storage.disk',
                                        [('disk_identifier', '=', ident)],
                                        {'order_by': ['disk_expiretime']})
        if ident and qs:
            disk = qs[0]
            new = False
        else:
            new = True
            qs = await self.middleware.call('datastore.query', 'storage.disk',
                                            [('disk_name', '=', name)])
            for i in qs:
                i['disk_expiretime'] = datetime.utcnow() + timedelta(
                    days=DISK_EXPIRECACHE_DAYS)
                await self.middleware.call('datastore.update', 'storage.disk',
                                           i['disk_identifier'], i)
            disk = {'disk_identifier': ident}
        disk.update({'disk_name': name, 'disk_expiretime': None})

        await self.middleware.run_in_thread(geom.scan)
        g = geom.geom_by_name('DISK', name)
        if g:
            if g.provider.config['ident']:
                disk['disk_serial'] = g.provider.config['ident']
            if g.provider.mediasize:
                disk['disk_size'] = g.provider.mediasize
        if not disk.get('disk_serial'):
            disk['disk_serial'] = await self.serial_from_device(name) or ''
        reg = RE_DSKNAME.search(name)
        if reg:
            disk['disk_subsystem'] = reg.group(1)
            disk['disk_number'] = int(reg.group(2))
        if not new:
            await self.middleware.call('datastore.update', 'storage.disk',
                                       disk['disk_identifier'], disk)
        else:
            disk['disk_identifier'] = await self.middleware.call(
                'datastore.insert', 'storage.disk', disk)

        # FIXME: use a truenas middleware plugin
        await self.middleware.call('notifier.sync_disk_extra',
                                   disk['disk_identifier'], False)

    @private
    @accepts()
    async def sync_all(self):
        """
        Synchronyze all disks with the cache in database.
        """
        # Skip sync disks on backup node
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('notifier.failover_licensed')
                and await self.middleware.call('notifier.failover_status')
                == 'BACKUP'):
            return

        sys_disks = list((await self.middleware.call('device.get_info',
                                                     'DISK')).keys())

        seen_disks = {}
        serials = []
        await self.middleware.run_in_thread(geom.scan)
        for disk in (await
                     self.middleware.call('datastore.query', 'storage.disk',
                                          [],
                                          {'order_by': ['disk_expiretime']})):

            name = await self.middleware.call('notifier.identifier_to_device',
                                              disk['disk_identifier'])
            if not name or name in seen_disks:
                # If we cant translate the indentifier to a device, give up
                # If name has already been seen once then we are probably
                # dealing with with multipath here
                if not disk['disk_expiretime']:
                    disk['disk_expiretime'] = datetime.utcnow() + timedelta(
                        days=DISK_EXPIRECACHE_DAYS)
                    await self.middleware.call('datastore.update',
                                               'storage.disk',
                                               disk['disk_identifier'], disk)
                elif disk['disk_expiretime'] < datetime.utcnow():
                    # Disk expire time has surpassed, go ahead and remove it
                    await self.middleware.call('datastore.delete',
                                               'storage.disk',
                                               disk['disk_identifier'])
                continue
            else:
                disk['disk_expiretime'] = None
                disk['disk_name'] = name

            reg = RE_DSKNAME.search(name)
            if reg:
                disk['disk_subsystem'] = reg.group(1)
                disk['disk_number'] = int(reg.group(2))
            serial = ''
            g = geom.geom_by_name('DISK', name)
            if g:
                if g.provider.config['ident']:
                    serial = disk['disk_serial'] = g.provider.config['ident']
                serial += g.provider.config.get('lunid') or ''
                if g.provider.mediasize:
                    disk['disk_size'] = g.provider.mediasize
            if not disk.get('disk_serial'):
                serial = disk['disk_serial'] = await self.serial_from_device(
                    name) or ''

            if serial:
                serials.append(serial)

            # If for some reason disk is not identified as a system disk
            # mark it to expire.
            if name not in sys_disks and not disk['disk_expiretime']:
                disk['disk_expiretime'] = datetime.utcnow() + timedelta(
                    days=DISK_EXPIRECACHE_DAYS)
            await self.middleware.call('datastore.update', 'storage.disk',
                                       disk['disk_identifier'], disk)

            # FIXME: use a truenas middleware plugin
            await self.middleware.call('notifier.sync_disk_extra',
                                       disk['disk_identifier'], False)
            seen_disks[name] = disk

        for name in sys_disks:
            if name not in seen_disks:
                disk_identifier = await self.device_to_identifier(name)
                qs = await self.middleware.call(
                    'datastore.query', 'storage.disk',
                    [('disk_identifier', '=', disk_identifier)])
                if qs:
                    new = False
                    disk = qs[0]
                else:
                    new = True
                    disk = {'disk_identifier': disk_identifier}
                disk['disk_name'] = name
                serial = ''
                g = geom.geom_by_name('DISK', name)
                if g:
                    if g.provider.config['ident']:
                        serial = disk['disk_serial'] = g.provider.config[
                            'ident']
                    serial += g.provider.config.get('lunid') or ''
                    if g.provider.mediasize:
                        disk['disk_size'] = g.provider.mediasize
                if not disk.get('disk_serial'):
                    serial = disk[
                        'disk_serial'] = await self.serial_from_device(name
                                                                       ) or ''
                if serial:
                    if serial in serials:
                        # Probably dealing with multipath here, do not add another
                        continue
                    else:
                        serials.append(serial)
                reg = RE_DSKNAME.search(name)
                if reg:
                    disk['disk_subsystem'] = reg.group(1)
                    disk['disk_number'] = int(reg.group(2))

                if not new:
                    await self.middleware.call('datastore.update',
                                               'storage.disk',
                                               disk['disk_identifier'], disk)
                else:
                    disk['disk_identifier'] = await self.middleware.call(
                        'datastore.insert', 'storage.disk', disk)
                # FIXME: use a truenas middleware plugin
                await self.middleware.call('notifier.sync_disk_extra',
                                           disk['disk_identifier'], True)

    async def __multipath_create(self, name, consumers, mode=None):
        """
        Create an Active/Passive GEOM_MULTIPATH provider
        with name ``name`` using ``consumers`` as the consumers for it

        Modes:
            A - Active/Active
            R - Active/Read
            None - Active/Passive

        Returns:
            True in case the label succeeded and False otherwise
        """
        cmd = ["/sbin/gmultipath", "label", name] + consumers
        if mode:
            cmd.insert(2, f'-{mode}')
        p1 = await Popen(cmd, stdout=subprocess.PIPE)
        if (await p1.wait()) != 0:
            return False
        return True

    async def __multipath_next(self):
        """
        Find out the next available name for a multipath named diskX
        where X is a crescenting value starting from 1

        Returns:
            The string of the multipath name to be created
        """
        await self.middleware.run_in_thread(geom.scan)
        numbers = sorted([
            int(RE_MPATH_NAME.search(g.name).group(1))
            for g in geom.class_by_name('MULTIPATH').geoms
            if RE_MPATH_NAME.match(g.name)
        ])
        if not numbers:
            numbers = [0]
        for number in range(1, numbers[-1] + 2):
            if number not in numbers:
                break
        else:
            raise ValueError('Could not find multipaths')
        return f'disk{number}'

    @private
    @accepts()
    async def multipath_sync(self):
        """
        Synchronize multipath disks

        Every distinct GEOM_DISK that shares an ident (aka disk serial)
        with conjunction of the lunid is considered a multipath and will be
        handled by GEOM_MULTIPATH.

        If the disk is not currently in use by some Volume or iSCSI Disk Extent
        then a gmultipath is automatically created and will be available for use.
        """

        await self.middleware.run_in_thread(geom.scan)

        mp_disks = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    self.logger.warn(
                        "A consumer that is not a disk (%s) is part of a "
                        "MULTIPATH, currently unsupported by middleware",
                        p_geom.clazz.name)
                    continue
                mp_disks.append(p_geom.name)

        reserved = await self.get_reserved()

        is_freenas = await self.middleware.call('system.is_freenas')

        serials = defaultdict(list)
        active_active = []
        for g in geom.class_by_name('DISK').geoms:
            if not RE_DA.match(
                    g.name) or g.name in reserved or g.name in mp_disks:
                continue
            if not is_freenas:
                descr = g.provider.config.get('descr') or ''
                if (descr == 'STEC ZeusRAM' or descr.startswith('VIOLIN')
                        or descr.startswith('3PAR')):
                    active_active.append(g.name)
            serial = ''
            v = g.provider.config.get('ident')
            if v:
                serial = v
            v = g.provider.config.get('lunid')
            if v:
                serial += v
            if not serial:
                continue
            size = g.provider.mediasize
            serials[(serial, size)].append(g.name)
            serials[(serial, size)].sort(key=lambda x: int(x[2:]))

        disks_pairs = [disks for disks in list(serials.values())]
        disks_pairs.sort(key=lambda x: int(x[0][2:]))

        # Mode is Active/Passive for FreeNAS
        mode = None if is_freenas else 'R'
        for disks in disks_pairs:
            if not len(disks) > 1:
                continue
            name = await self.__multipath_next()
            await self.__multipath_create(
                name, disks, 'A' if disks[0] in active_active else mode)

        # Scan again to take new multipaths into account
        await self.middleware.run_in_thread(geom.scan)
        mp_ids = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            _disks = []
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    continue
                _disks.append(p_geom.name)

            qs = await self.middleware.call(
                'datastore.query', 'storage.disk', [
                    [
                        'OR',
                        [
                            ['disk_name', 'in', _disks],
                            ['disk_multipath_member', 'in', _disks],
                        ]
                    ],
                ])
            if qs:
                diskobj = qs[0]
                mp_ids.append(diskobj['disk_identifier'])
                update = False  # Make sure to not update if nothing changed
                if diskobj['disk_multipath_name'] != g.name:
                    update = True
                    diskobj['disk_multipath_name'] = g.name
                if diskobj['disk_name'] in _disks:
                    _disks.remove(diskobj['disk_name'])
                if _disks and diskobj['disk_multipath_member'] != _disks[-1]:
                    update = True
                    diskobj['disk_multipath_member'] = _disks.pop()
                if update:
                    await self.middleware.call('datastore.update',
                                               'storage.disk',
                                               diskobj['disk_identifier'],
                                               diskobj)

        # Update all disks which were not identified as MULTIPATH, resetting attributes
        for disk in (await self.middleware.call(
                'datastore.query', 'storage.disk',
            [('disk_identifier', 'nin', mp_ids)])):
            if disk['disk_multipath_name'] or disk['disk_multipath_member']:
                disk['disk_multipath_name'] = ''
                disk['disk_multipath_member'] = ''
                await self.middleware.call('datastore.update', 'storage.disk',
                                           disk['disk_identifier'], disk)

    @private
    async def swaps_configure(self):
        """
        Configures swap partitions in the system.
        We try to mirror all available swap partitions to avoid a system
        crash in case one of them dies.
        """
        await self.middleware.run_in_thread(geom.scan)

        used_partitions = set()
        swap_devices = []
        klass = geom.class_by_name('MIRROR')
        if klass:
            for g in klass.geoms:
                # Skip gmirror that is not swap*
                if not g.name.startswith('swap') or g.name.endswith('.sync'):
                    continue
                consumers = list(g.consumers)
                # If the mirror is degraded lets remove it and make a new pair
                if len(consumers) == 1:
                    c = consumers[0]
                    await self.swaps_remove_disks([c.provider.geom.name])
                else:
                    swap_devices.append(f'mirror/{g.name}')
                    for c in consumers:
                        # Add all partitions used in swap, removing .eli
                        used_partitions.add(c.provider.name.strip('.eli'))

        klass = geom.class_by_name('PART')
        if not klass:
            return

        # Get all partitions of swap type, indexed by size
        swap_partitions_by_size = defaultdict(list)
        for g in klass.geoms:
            for p in g.providers:
                # if swap partition
                if p.config[
                        'rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b':
                    if p.name not in used_partitions:
                        # Try to save a core dump from that.
                        # Only try savecore if the partition is not already in use
                        # to avoid errors in the console (#27516)
                        await run('savecore',
                                  '-z',
                                  '-m',
                                  '5',
                                  '/data/crash/',
                                  f'/dev/{p.name}',
                                  check=False)
                        swap_partitions_by_size[p.mediasize].append(p.name)

        dumpdev = False
        unused_partitions = []
        for size, partitions in swap_partitions_by_size.items():
            # If we have only one partition add it to unused_partitions list
            if len(partitions) == 1:
                unused_partitions += partitions
                continue

            for i in range(int(len(partitions) / 2)):
                if len(swap_devices) > MIRROR_MAX:
                    break
                part_a, part_b = partitions[0:2]
                partitions = partitions[2:]
                if not dumpdev:
                    dumpdev = await dempdev_configure(part_a)
                try:
                    name = new_swap_name()
                    if name is None:
                        # Which means maximum has been reached and we can stop
                        break
                    await run('gmirror', 'create', '-b', 'prefer', name,
                              part_a, part_b)
                except Exception:
                    self.logger.warn(f'Failed to create gmirror {name}',
                                     exc_info=True)
                    continue
                swap_devices.append(f'mirror/{name}')
                # Add remaining partitions to unused list
                unused_partitions += partitions

        # If we could not make even a single swap mirror, add the first unused
        # partition as a swap device
        if not swap_devices and unused_partitions:
            if not dumpdev:
                dumpdev = await dempdev_configure(unused_partitions[0])
            swap_devices.append(unused_partitions[0])

        for name in swap_devices:
            if not os.path.exists(f'/dev/{name}.eli'):
                await run('geli', 'onetime', name)
            await run('swapon', f'/dev/{name}.eli', check=False)

        return swap_devices

    @private
    async def swaps_remove_disks(self, disks):
        """
        Remove a given disk (e.g. ["da0", "da1"]) from swap.
        it will offline if from swap, remove it from the gmirror (if exists)
        and detach the geli.
        """
        await self.middleware.run_in_thread(geom.scan)
        providers = {}
        for disk in disks:
            partgeom = geom.geom_by_name('PART', disk)
            if not partgeom:
                continue
            for p in partgeom.providers:
                if p.config[
                        'rawtype'] == '516e7cb5-6ecf-11d6-8ff8-00022d09712b':
                    providers[p.id] = p
                    break

        if not providers:
            return

        klass = geom.class_by_name('MIRROR')
        if not klass:
            return

        mirrors = set()
        for g in klass.geoms:
            for c in g.consumers:
                if c.provider.id in providers:
                    mirrors.add(g.name)
                    del providers[c.provider.id]

        for name in mirrors:
            await run('swapoff', f'/dev/mirror/{name}.eli')
            if os.path.exists(f'/dev/mirror/{name}.eli'):
                await run('geli', 'detach', f'mirror/{name}.eli')
            await run('gmirror', 'destroy', name)

        for p in providers.values():
            await run('swapoff', f'/dev/{p.name}.eli')

    @private
    async def wipe_quick(self, dev, size=None):
        """
        Perform a quick wipe of a disk `dev` by the first few and last few megabytes
        """
        # If the size is too small, lets just skip it for now.
        # In the future we can adjust dd size
        if size and size < 33554432:
            return
        await run('dd', 'if=/dev/zero', f'of=/dev/{dev}', 'bs=1m', 'count=32')
        try:
            cp = await run('diskinfo', dev)
            size = int(
                int(re.sub(r'\s+', ' ', cp.stdout.decode()).split()[2]) /
                (1024))
        except subprocess.CalledProcessError:
            self.logger.error(f'Unable to determine size of {dev}')
        else:
            # This will fail when EOL is reached
            await run('dd',
                      'if=/dev/zero',
                      f'of=/dev/{dev}',
                      'bs=1m',
                      f'oseek={int(size / 1024) - 32}',
                      check=False)

    @accepts(Str('dev'), Str('mode', enum=['QUICK', 'FULL', 'FULL_RANDOM']))
    @job(lock=lambda args: args[0])
    async def wipe(self, job, dev, mode):
        """
        Performs a wipe of a disk `dev`.
        It can be of the following modes:
          - QUICK: clean the first few and last megabytes of every partition and disk
          - FULL: write whole disk with zero's
          - FULL_RANDOM: write whole disk with random bytes
        """
        await self.swaps_remove_disks([dev])

        # First do a quick wipe of every partition to clean things like zfs labels
        if mode == 'QUICK':
            await self.middleware.run_in_thread(geom.scan)
            klass = geom.class_by_name('PART')
            for g in klass.xml.findall(f'./geom[name=\'{dev}\']'):
                for p in g.findall('./provider'):
                    size = p.find('./mediasize')
                    if size is not None:
                        try:
                            size = int(size.text)
                        except ValueError:
                            size = None
                    name = p.find('./name')
                    await self.wipe_quick(name.text, size=size)

        await run('gpart', 'destroy', '-F', f'/dev/{dev}', check=False)

        # Wipe out the partition table by doing an additional iterate of create/destroy
        await run('gpart', 'create', '-s', 'gpt', f'/dev/{dev}')
        await run('gpart', 'destroy', '-F', f'/dev/{dev}')

        if mode == 'QUICK':
            await self.wipe_quick(dev)
        else:
            cp = await run('diskinfo', dev)
            size = int(re.sub(r'\s+', ' ', cp.stdout.decode()).split()[2])

            proc = await Popen([
                'dd',
                'if=/dev/{}'.format('zero' if mode == 'FULL' else 'random'),
                f'of=/dev/{dev}',
                'bs=1m',
            ],
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE)

            async def dd_wait():
                while True:
                    if proc.returncode is not None:
                        break
                    os.kill(proc.pid, signal.SIGINFO)
                    await asyncio.sleep(1)

            asyncio.ensure_future(dd_wait())

            while True:
                line = await proc.stderr.readline()
                if line == b'':
                    break
                line = line.decode()
                reg = RE_DD.search(line)
                if reg:
                    job.set_progress(int(reg.group(1)) / size,
                                     extra={'speed': int(reg.group(2))})

        await self.sync(dev)
示例#2
0
class FilesystemService(Service):
    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.
        """
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.middleware.call_sync(
            'filesystem.acl_is_trivial', path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options=None):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options=None):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        options = options or {}
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @job(pipes=["input"])
    async def put(self, job, path, options=None):
        """
        Job to put contents to `path`.
        """
        options = options or {}
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            statfs = bsd.statfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)
        return {
            **statfs.__getstate__(),
            'total_bytes': statfs.total_blocks * statfs.blocksize,
            'free_bytes': statfs.free_blocks * statfs.blocksize,
            'avail_bytes': statfs.avail_blocks * statfs.blocksize,
        }

    def __convert_to_basic_permset(self, permset):
        """
        Convert "advanced" ACL permset format to basic format using
        bitwise operation and constants defined in py-bsd/bsd/acl.pyx,
        py-bsd/defs.pxd and acl.h.

        If the advanced ACL can't be converted without losing
        information, we return 'OTHER'.

        Reverse process converts the constant's value to a dictionary
        using a bitwise operation.
        """
        perm = 0
        for k, v, in permset.items():
            if v:
                perm |= acl.NFS4Perm[k]

        try:
            SimplePerm = (acl.NFS4BasicPermset(perm)).name
        except Exception:
            SimplePerm = 'OTHER'

        return SimplePerm

    def __convert_to_basic_flagset(self, flagset):
        flags = 0
        for k, v, in flagset.items():
            if k == "INHERITED":
                continue
            if v:
                flags |= acl.NFS4Flag[k]

        try:
            SimpleFlag = (acl.NFS4BasicFlagset(flags)).name
        except Exception:
            SimpleFlag = 'OTHER'

        return SimpleFlag

    def __convert_to_adv_permset(self, basic_perm):
        permset = {}
        perm_mask = acl.NFS4BasicPermset[basic_perm].value
        for name, member in acl.NFS4Perm.__members__.items():
            if perm_mask & member.value:
                permset.update({name: True})
            else:
                permset.update({name: False})

        return permset

    def __convert_to_adv_flagset(self, basic_flag):
        flagset = {}
        flag_mask = acl.NFS4BasicFlagset[basic_flag].value
        for name, member in acl.NFS4Flag.__members__.items():
            if flag_mask & member.value:
                flagset.update({name: True})
            else:
                flagset.update({name: False})

        return flagset

    def _winacl(self, path, action, uid, gid, options):
        chroot_dir = os.path.dirname(path)
        target = os.path.basename(path)
        winacl = subprocess.run([
            '/usr/local/bin/winacl', '-a', action, '-O',
            str(uid), '-G',
            str(gid), '-rx' if options['traverse'] else '-r', '-c', chroot_dir,
            '-p', target
        ],
                                check=False,
                                capture_output=True)
        if winacl.returncode != 0:
            CallError(
                f"Winacl {action} on path {path} failed with error: [{winacl.stderr.decode().strip()}]"
            )

    def _common_perm_path_validate(self, path):
        if not os.path.exists(path):
            raise CallError(f"Path not found: {path}", errno.ENOENT)

        if not os.path.realpath(path).startswith('/mnt/'):
            raise CallError(
                f"Changing permissions on paths outside of /mnt is not permitted: {path}",
                errno.EPERM)

        if os.path.realpath(path) in [
                x['path'] for x in self.middleware.call_sync('pool.query')
        ]:
            raise CallError(
                f"Changing permissions of root level dataset is not permitted: {path}",
                errno.EPERM)

    @accepts(Str('path'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules, or if the path does not support NFSv4 ACLs (for example
        a path on a tmpfs filesystem).
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        has_nfs4_acl_support = os.pathconf(path, 64)
        if not has_nfs4_acl_support:
            return True

        return acl.ACL(file=path).is_trivial

    @accepts(
        Dict(
            'filesystem_ownership', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict('options', Bool('recursive', default=False),
                 Bool('traverse', default=False))))
    @job(lock="perm_change")
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')

    @accepts(
        Dict(
            'filesystem_permission', Str('path', required=True),
            UnixPerm('mode', null=True), Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            Dict(
                'options',
                Bool('stripacl', default=False),
                Bool('recursive', default=False),
                Bool('traverse', default=False),
            )))
    @job(lock="perm_change")
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        if uid or gid:
            os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')

    @accepts()
    async def default_acl_choices(self):
        """
        Get list of default ACL types.
        """
        acl_choices = []
        for x in ACLDefault:
            if x.value['visible']:
                acl_choices.append(x.name)

        return acl_choices

    @accepts(
        Str('acl_type', default='OPEN', enum=[x.name for x in ACLDefault]),
        Str('share_type', default='NONE', enum=['NONE', 'AFP', 'SMB', 'NFS']),
    )
    async def get_default_acl(self, acl_type, share_type):
        """
        Returns a default ACL depending on the usage specified by `acl_type`.
        If an admin group is defined, then an entry granting it full control will
        be placed at the top of the ACL. Optionally may pass `share_type` to argument
        to get share-specific template ACL.
        """
        acl = []
        admin_group = (await self.middleware.call('smb.config'))['admin_group']
        if acl_type == 'HOME' and (await self.middleware.call(
                'activedirectory.get_state')) == 'HEALTHY':
            acl_type = 'DOMAIN_HOME'
        if admin_group:
            acl.append({
                'tag':
                'GROUP',
                'id': (await self.middleware.call('dscache.get_uncached_group',
                                                  admin_group))['gr_gid'],
                'perms': {
                    'BASIC': 'FULL_CONTROL'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type':
                'ALLOW'
            })
        if share_type == 'SMB':
            acl.append({
                'tag': 'GROUP',
                'id': int(SMBBuiltin['USERS'].value[1][9:]),
                'perms': {
                    'BASIC': 'MODIFY'
                },
                'flags': {
                    'BASIC': 'INHERIT'
                },
                'type': 'ALLOW'
            })
        acl.extend((ACLDefault[acl_type].value)['acl'])

        return acl

    def _is_inheritable(self, flags):
        """
        Takes ACE flags and return True if any inheritance bits are set.
        """
        inheritance_flags = [
            'FILE_INHERIT', 'DIRECTORY_INHERIT', 'NO_PROPAGATE_INHERIT',
            'INHERIT_ONLY'
        ]
        for i in inheritance_flags:
            if flags.get(i):
                return True

        return False

    @private
    def canonicalize_acl_order(self, acl):
        """
        Convert flags to advanced, then separate the ACL into two lists. One for ACEs that have been inherited,
        one for aces that have not been inherited. Non-inherited ACEs take precedence
        and so they are placed first in finalized combined list. Within each list, the
        ACEs are orderd according to the following:

        1) Deny ACEs that apply to the object itself (NOINHERIT)

        2) Deny ACEs that apply to a subobject of the object (INHERIT)

        3) Allow ACEs that apply to the object itself (NOINHERIT)

        4) Allow ACEs that apply to a subobject of the object (INHERIT)

        See http://docs.microsoft.com/en-us/windows/desktop/secauthz/order-of-aces-in-a-dacl

        The "INHERITED" bit is stripped in filesystem.getacl when generating a BASIC flag type.
        It is best practice to use a non-simplified ACL for canonicalization.
        """
        inherited_aces = []
        final_acl = []
        non_inherited_aces = []
        for entry in acl:
            entry['flags'] = self.__convert_to_adv_flagset(
                entry['flags']
                ['BASIC']) if 'BASIC' in entry['flags'] else entry['flags']
            if entry['flags'].get('INHERITED'):
                inherited_aces.append(entry)
            else:
                non_inherited_aces.append(entry)

        if inherited_aces:
            inherited_aces = sorted(
                inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        if non_inherited_aces:
            non_inherited_aces = sorted(
                non_inherited_aces,
                key=lambda x:
                (x['type'] == 'ALLOW', self._is_inheritable(x['flags'])),
            )
        final_acl = non_inherited_aces + inherited_aces
        return final_acl

    @accepts(
        Str('path'),
        Bool('simplified', default=True),
    )
    def getacl(self, path, simplified=True):
        """
        Return ACL of a given path.

        Simplified returns a shortened form of the ACL permset and flags

        `TRAVERSE` sufficient rights to traverse a directory, but not read contents.

        `READ` sufficient rights to traverse a directory, and read file contents.

        `MODIFIY` sufficient rights to traverse, read, write, and modify a file. Equivalent to modify_set.

        `FULL_CONTROL` all permissions.

        If the permisssions do not fit within one of the pre-defined simplified permissions types, then
        the full ACL entry will be returned.

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled. We also remove it here to avoid confusion.
        """
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        stat = os.stat(path)

        a = acl.ACL(file=path)
        fs_acl = a.__getstate__()

        if not simplified:
            advanced_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': entry['perms'],
                    'flags': entry['flags'],
                }
                if ace['tag'] == 'everyone@' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    continue

                advanced_acl.append(ace)

            return {
                'uid': stat.st_uid,
                'gid': stat.st_gid,
                'acl': advanced_acl
            }

        if simplified:
            simple_acl = []
            for entry in fs_acl:
                ace = {
                    'tag': (acl.ACLWho[entry['tag']]).value,
                    'id': entry['id'],
                    'type': entry['type'],
                    'perms': {
                        'BASIC':
                        self.__convert_to_basic_permset(entry['perms'])
                    },
                    'flags': {
                        'BASIC':
                        self.__convert_to_basic_flagset(entry['flags'])
                    },
                }
                if ace['tag'] == 'everyone@' and ace['perms'][
                        'BASIC'] == 'NOPERMS':
                    continue

                for key in ['perms', 'flags']:
                    if ace[key]['BASIC'] == 'OTHER':
                        ace[key] = entry[key]

                simple_acl.append(ace)

            return {'uid': stat.st_uid, 'gid': stat.st_gid, 'acl': simple_acl}

    @accepts(
        Dict(
            'filesystem_acl', Str('path', required=True),
            Int('uid', null=True, default=None),
            Int('gid', null=True, default=None),
            List('dacl',
                 items=[
                     Dict(
                         'aclentry',
                         Str('tag',
                             enum=[
                                 'owner@', 'group@', 'everyone@', 'USER',
                                 'GROUP'
                             ]),
                         Int('id', null=True),
                         Str('type', enum=['ALLOW', 'DENY']),
                         Dict(
                             'perms',
                             Bool('READ_DATA'),
                             Bool('WRITE_DATA'),
                             Bool('APPEND_DATA'),
                             Bool('READ_NAMED_ATTRS'),
                             Bool('WRITE_NAMED_ATTRS'),
                             Bool('EXECUTE'),
                             Bool('DELETE_CHILD'),
                             Bool('READ_ATTRIBUTES'),
                             Bool('WRITE_ATTRIBUTES'),
                             Bool('DELETE'),
                             Bool('READ_ACL'),
                             Bool('WRITE_ACL'),
                             Bool('WRITE_OWNER'),
                             Bool('SYNCHRONIZE'),
                             Str('BASIC',
                                 enum=[
                                     'FULL_CONTROL', 'MODIFY', 'READ',
                                     'TRAVERSE'
                                 ]),
                         ),
                         Dict(
                             'flags',
                             Bool('FILE_INHERIT'),
                             Bool('DIRECTORY_INHERIT'),
                             Bool('NO_PROPAGATE_INHERIT'),
                             Bool('INHERIT_ONLY'),
                             Bool('INHERITED'),
                             Str('BASIC', enum=['INHERIT', 'NOINHERIT']),
                         ),
                     )
                 ],
                 default=[]),
            Dict('options', Bool('stripacl', default=False),
                 Bool('recursive', default=False),
                 Bool('traverse', default=False),
                 Bool('canonicalize', default=True))))
    @job(lock="perm_change")
    def setacl(self, job, data):
        """
        Set ACL of a given path. Takes the following parameters:
        `path` full path to directory or file.

        `dacl` "simplified" ACL here or a full ACL.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `recursive` apply the ACL recursively

        `traverse` traverse filestem boundaries (ZFS datasets)

        `strip` convert ACL to trivial. ACL is trivial if it can be expressed as a file mode without
        losing any access rules.

        `canonicalize` reorder ACL entries so that they are in concanical form as described
        in the Microsoft documentation MS-DTYP 2.4.5 (ACL)

        In all cases we replace USER_OBJ, GROUP_OBJ, and EVERYONE with owner@, group@, everyone@ for
        consistency with getfacl and setfacl. If one of aforementioned special tags is used, 'id' must
        be set to None.

        An inheriting empty everyone@ ACE is appended to non-trivial ACLs in order to enforce Windows
        expectations regarding permissions inheritance. This entry is removed from NT ACL returned
        to SMB clients when 'ixnas' samba VFS module is enabled.
        """
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags'].get('INHERIT_ONLY') and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                cleaned_acl.append(ace)
            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting ACL.')
示例#3
0
class SharingAFPService(CRUDService):
    class Config:
        namespace = 'sharing.afp'
        datastore = 'sharing.afp_share'
        datastore_prefix = 'afp_'
        datastore_extend = 'sharing.afp.extend'

    @accepts(
        Dict('sharingafp_create',
             Str('path', required=True),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             List('allow', default=[]),
             List('deny', default=[]),
             List('ro', default=[]),
             List('rw', default=[]),
             Bool('timemachine', default=False),
             Int('timemachine_quota', default=0),
             Bool('nodev', default=False),
             Bool('nostat', default=False),
             Bool('upriv', default=True),
             UnixPerm('fperm', default='644'),
             UnixPerm('dperm', default='755'),
             UnixPerm('umask', default='000'),
             List('hostsallow', items=[], default=[]),
             List('hostsdeny', items=[], default=[]),
             Str('vuid', null=True, default=''),
             Str('auxparams', max_length=None),
             Bool('enabled', default=True),
             register=True))
    async def do_create(self, data):
        """
        Create AFP share.

        `allow`, `deny`, `ro`, and `rw` are lists of users and groups. Groups are designated by
        an @ prefix.

        `hostsallow` and `hostsdeny` are lists of hosts and/or networks.
        """
        verrors = ValidationErrors()
        path = data['path']

        await self.clean(data, 'sharingafp_create', verrors)
        await self.validate(data, 'sharingafp_create', verrors)

        await check_path_resides_within_volume(verrors, self.middleware,
                                               'sharingafp_create.path', path)

        verrors.check()

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)

        await self._service_change('afp', 'reload')

        return data

    @accepts(Int('id'),
             Patch('sharingafp_create', 'sharingafp_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update AFP share `id`.
        """
        verrors = ValidationErrors()
        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })
        path = data.get('path')

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingafp_update', verrors, id=id)
        await self.validate(new, 'sharingafp_update', verrors, old=old)

        if path:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   'sharingafp_create.path',
                                                   path)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)

        await self._service_change('afp', 'reload')

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete AFP share `id`.
        """
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self._service_change('afp', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        await self.home_exists(data['home'], schema_name, verrors, old)
        if data['vuid']:
            try:
                uuid.UUID(data['vuid'], version=4)
            except ValueError:
                verrors.add(f'{schema_name}.vuid',
                            'vuid must be a valid UUID.')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        home = data['home']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if not name:
            if home:
                name = 'Homes'
            else:
                name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['allow'] = data['allow'].split()
        data['deny'] = data['deny'].split()
        data['ro'] = data['ro'].split()
        data['rw'] = data['rw'].split()
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['allow'] = ' '.join(data['allow'])
        data['deny'] = ' '.join(data['deny'])
        data['ro'] = ' '.join(data['ro'])
        data['rw'] = ' '.join(data['rw'])
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        if not data['vuid'] and data['timemachine']:
            data['vuid'] = str(uuid.uuid4())
        return data
示例#4
0
class SharingSMBService(CRUDService):
    class Config:
        namespace = 'sharing.smb'
        datastore = 'sharing.cifs_share'
        datastore_prefix = 'cifs_'
        datastore_extend = 'sharing.smb.extend'

    @accepts(
        Dict('sharingsmb_create',
             Str('path', required=True),
             Bool('home', default=False),
             Str('name'),
             Str('comment'),
             Bool('ro', default=False),
             Bool('browsable', default=True),
             Bool('recyclebin', default=False),
             Bool('showhiddenfiles', default=False),
             Bool('guestok', default=False),
             Bool('guestonly', default=False),
             Bool('abe', default=False),
             List('hostsallow', items=[IPAddr('ip', network=True)],
                  default=[]),
             List('hostsdeny', items=[IPAddr('ip', network=True)], default=[]),
             List('vfsobjects',
                  default=['zfs_space', 'zfsacl', 'streams_xattr']),
             Int('storage_task'),
             Str('auxsmbconf'),
             Bool('default_permissions'),
             register=True))
    async def do_create(self, data):
        verrors = ValidationErrors()
        path = data['path']

        default_perms = data.pop('default_permissions', True)

        await self.clean(data, 'sharingsmb_create', verrors)
        await self.validate(data, 'sharingsmb_create', verrors)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(data)
        await self.set_storage_tasks(data)
        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.extend(data)  # We should do this in the insert call ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return data

    @accepts(Int('id'),
             Patch('sharingsmb_create', 'sharingsmb_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        verrors = ValidationErrors()
        path = data.get('path')
        default_perms = data.pop('default_permissions', False)

        old = await self.middleware.call(
            'datastore.query', self._config.datastore, [('id', '=', id)], {
                'extend': self._config.datastore_extend,
                'prefix': self._config.datastore_prefix,
                'get': True
            })

        new = old.copy()
        new.update(data)

        await self.clean(new, 'sharingsmb_update', verrors, id=id)
        await self.validate(new, 'sharingsmb_update', verrors, old=old)

        if verrors:
            raise verrors

        if path and not os.path.exists(path):
            try:
                os.makedirs(path)
            except OSError as e:
                raise CallError(f'Failed to create {path}: {e}')

        await self.compress(new)
        await self.set_storage_tasks(new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.extend(new)  # same here ?

        await self._service_change('cifs', 'reload')
        await self.apply_default_perms(default_perms, path, data['home'])

        return new

    @accepts(Int('id'))
    async def do_delete(self, id):
        share = await self._get_instance(id)
        result = await self.middleware.call('datastore.delete',
                                            self._config.datastore, id)
        await self.middleware.call('notifier.sharesec_delete', share['name'])
        await self._service_change('cifs', 'reload')
        return result

    @private
    async def clean(self, data, schema_name, verrors, id=None):
        data['name'] = await self.name_exists(data, schema_name, verrors, id)

    @private
    async def validate(self, data, schema_name, verrors, old=None):
        home_result = await self.home_exists(data['home'], schema_name,
                                             verrors, old)

        if home_result:
            verrors.add(f'{schema_name}.home',
                        'Only one share is allowed to be a home share.')
        elif not home_result and not data['path']:
            verrors.add(f'{schema_name}.path', 'This field is required.')

        if data['path']:
            await check_path_resides_within_volume(verrors, self.middleware,
                                                   f"{schema_name}.path",
                                                   data['path'])

        if data.get('name') and data['name'] == 'global':
            verrors.add(
                f'{schema_name}.name',
                'Global is a reserved section name, please select another one')

    @private
    async def home_exists(self, home, schema_name, verrors, old=None):
        home_filters = [('home', '=', True)]
        home_result = None

        if home:
            if old and old['id'] is not None:
                id = old['id']

                if not old['home']:
                    home_filters.append(('id', '!=', id))
                    # The user already had this set as the home share
                    home_result = await self.middleware.call(
                        'datastore.query', self._config.datastore,
                        home_filters,
                        {'prefix': self._config.datastore_prefix})

        return home_result

    @private
    async def name_exists(self, data, schema_name, verrors, id=None):
        name = data['name']
        path = data['path']
        name_filters = [('name', '=', name)]
        path_filters = [('path', '=', path)]

        if path and not name:
            name = path.rsplit('/', 1)[-1]

        if id is not None:
            name_filters.append(('id', '!=', id))
            path_filters.append(('id', '!=', id))

        name_result = await self.middleware.call(
            'datastore.query', self._config.datastore, name_filters,
            {'prefix': self._config.datastore_prefix})
        path_result = await self.middleware.call(
            'datastore.query', self._config.datastore, path_filters,
            {'prefix': self._config.datastore_prefix})

        if name_result:
            verrors.add(f'{schema_name}.name',
                        'A share with this name already exists.')

        if path_result:
            verrors.add(f'{schema_name}.path',
                        'A share with this path already exists.')

        return name

    @private
    async def extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()

        return data

    @private
    async def compress(self, data):
        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])

        return data

    @private
    async def apply_default_perms(self, default_perms, path, is_home):
        if default_perms:
            try:
                (owner,
                 group) = await self.middleware.call('notifier.mp_get_owner',
                                                     path)
            except Exception:
                (owner, group) = ('root', 'wheel')

            await self.middleware.call('notifier.winacl_reset', path, owner,
                                       group, None, not is_home)

    @accepts(Str('path', required=True))
    async def get_storage_tasks(self, path):
        zfs_datasets = await self.middleware.call(
            'zfs.dataset.query', [('type', '=', 'FILESYSTEM')])
        task_list = []
        task_dict = {}

        for ds in zfs_datasets:
            tasks = []
            name = ds['name']
            mountpoint = ds['properties']['mountpoint']['parsed']

            if path == mountpoint:
                tasks = await self.middleware.call(
                    'datastore.query', 'storage.task',
                    [['task_filesystem', '=', name]])
            elif path.startswith(f'{mountpoint}/'):
                tasks = await self.middleware.call(
                    'datastore.query', 'storage.task',
                    [['task_filesystem', '=', name],
                     ['task_recursive', '=', 'True']])

            task_list.extend(tasks)

        for task in task_list:
            task_id = task['id']
            fs = task['task_filesystem']
            retcount = task['task_ret_count']
            retunit = task['task_ret_unit']
            _interval = task['task_interval']
            interval = dict(await
                            self.middleware.call('notifier.choices',
                                                 'TASK_INTERVAL'))[_interval]

            msg = f'{fs} - every {interval} - {retcount}{retunit}'

            task_dict[task_id] = msg

        return task_dict

    @private
    async def set_storage_tasks(self, data):
        task = data.get('storage_task', None)
        home = data['home']
        path = data['path']
        task_list = []

        if not task:
            if path:
                task_list = await self.get_storage_tasks(path=path)
            elif home:
                task_list = await self.get_storage_tasks(home=home)

        if task_list:
            data['storage_task'] = list(task_list.keys())[0]

        return data

    @accepts()
    def vfsobjects_choices(self):
        vfs_modules_path = '/usr/local/lib/shared-modules/vfs'
        vfs_modules = []
        vfs_exclude = {'shadow_copy2', 'recycle', 'aio_pthread'}

        if os.path.exists(vfs_modules_path):
            vfs_modules.extend(
                filter(
                    lambda m: m not in vfs_exclude,
                    map(lambda f: f.rpartition('.')[0],
                        os.listdir(vfs_modules_path))))
        else:
            vfs_modules.extend(['streams_xattr'])

        return vfs_modules
示例#5
0
class UPSService(SystemServiceService):
    try:
        DRIVERS_AVAILABLE = set(os.listdir(DRIVER_BIN_DIR))
    except FileNotFoundError:
        DRIVERS_AVAILABLE = set()

    class Config:
        datastore = 'services.ups'
        datastore_prefix = 'ups_'
        datastore_extend = 'ups.ups_config_extend'
        service = 'ups'
        service_verb = 'restart'

    @private
    async def ups_config_extend(self, data):
        data['mode'] = data['mode'].upper()
        data['shutdown'] = data['shutdown'].upper()
        data['toemail'] = [v for v in data['toemail'].split(';') if v]
        host = 'localhost' if data['mode'] == 'MASTER' else data['remotehost']
        data['complete_identifier'] = f'{data["identifier"]}@{host}:{data["remoteport"]}'
        return data

    @accepts()
    async def port_choices(self):
        ports = [x for x in glob.glob('/dev/cua*') if x.find('.') == -1]
        ports.extend(glob.glob('/dev/ugen*'))
        ports.extend(glob.glob('/dev/uhid*'))
        ports.append('auto')
        return ports

    @accepts()
    def driver_choices(self):
        """
        Returns choices of UPS drivers supported by the system.
        """
        ups_choices = {}
        if osc.IS_LINUX:
            driver_list = '/usr/share/nut/driver.list'
        else:
            driver_list = '/conf/base/etc/local/nut/driver.list'
        if os.path.exists(driver_list):
            with open(driver_list, 'rb') as f:
                d = f.read().decode('utf8', 'ignore')
            r = io.StringIO()
            for line in re.sub(r'[ \t]+', ' ', d, flags=re.M).split('\n'):
                r.write(line.strip() + '\n')
            r.seek(0)
            reader = csv.reader(r, delimiter=' ', quotechar='"')
            for row in reader:
                if len(row) == 0 or row[0].startswith('#'):
                    continue
                if row[-2] == '#':
                    last = -3
                else:
                    last = -1
                driver_str = row[last]
                driver_annotation = ''
                m = re.match(r'(.+) \((.+)\)', driver_str)  # "blazer_usb (USB ID 0665:5161)"
                if m:
                    driver_str, driver_annotation = m.group(1), m.group(2)
                for driver in driver_str.split(' or '):  # can be "blazer_ser or blazer_usb"
                    driver = driver.strip()
                    if driver not in self.DRIVERS_AVAILABLE:
                        continue
                    for i, field in enumerate(list(row)):
                        row[i] = field
                    ups_choices['$'.join([driver, row[3]])] = '%s (%s)' % (
                        ' '.join(filter(None, row[0:last])),
                        ', '.join(filter(None, [driver, driver_annotation]))
                    )
        return ups_choices

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        driver = data.get('driver')
        if driver:
            if driver not in (await self.middleware.call('ups.driver_choices')).keys():
                verrors.add(
                    f'{schema}.driver',
                    'Driver selected does not match local machine\'s driver list'
                )

        identifier = data['identifier']
        if identifier:
            if not re.search(r'^[a-z0-9\.\-_]+$', identifier, re.I):
                verrors.add(
                    f'{schema}.identifier',
                    'Use alphanumeric characters, ".", "-" and "_"'
                )

        for field in ['monpwd', 'monuser']:
            if not data.get(field):
                verrors.add(f'{schema}.{field}', 'This field is required.')
            elif re.search(r'[ #]', data[field], re.I):
                verrors.add(f'{schema}.{field}', 'Spaces or number signs are not allowed.')

        mode = data.get('mode')
        if mode == 'MASTER':
            for field in filter(
                lambda f: not data[f],
                ['port', 'driver']
            ):
                verrors.add(
                    f'{schema}.{field}',
                    'This field is required'
                )
        else:
            if not data.get('remotehost'):
                verrors.add(
                    f'{schema}.remotehost',
                    'This field is required'
                )

        to_emails = data.get('toemail')
        if to_emails:
            data['toemail'] = ';'.join(to_emails)
        else:
            data['toemail'] = ''

        data['mode'] = data['mode'].lower()
        data['shutdown'] = data['shutdown'].lower()

        return verrors, data

    @accepts(
        Dict(
            'ups_update',
            Bool('emailnotify'),
            Bool('powerdown'),
            Bool('rmonitor'),
            Int('nocommwarntime', null=True),
            Int('remoteport', validators=[Port()]),
            Int('shutdowntimer'),
            Int('hostsync', validators=[Range(min=0)]),
            Str('description'),
            Str('driver'),
            Str('extrausers', max_length=None),
            Str('identifier', empty=False),
            Str('mode', enum=['MASTER', 'SLAVE']),
            Str('monpwd', empty=False),
            Str('monuser', empty=False),
            Str('options', max_length=None),
            Str('optionsupsd', max_length=None),
            Str('port'),
            Str('remotehost'),
            Str('shutdown', enum=['LOWBATT', 'BATT']),
            Str('shutdowncmd', null=True),
            Str('subject'),
            List('toemail', items=[Str('email', validators=[Email()])]),
            update=True
        )
    )
    async def do_update(self, data):
        """
        Update UPS Service Configuration.

        `emailnotify` when enabled, sends out notifications of different UPS events via email.

        `powerdown` when enabled, sets UPS to power off after shutting down the system.

        `nocommwarntime` is a value in seconds which makes UPS Service wait the specified seconds before alerting that
        the Service cannot reach configured UPS.

        `shutdowntimer` is a value in seconds which tells the Service to wait specified seconds for the UPS before
        initiating a shutdown. This only applies when `shutdown` is set to "BATT".

        `shutdowncmd` is the command which is executed to initiate a shutdown. It defaults to "poweroff".

        `toemail` is a list of valid email id's on which notification emails are sent.
        """
        config = await self.config()
        config.pop('complete_identifier')
        old_config = config.copy()
        config.update(data)
        verros, config = await self.validate_data(config, 'ups_update')
        if verros:
            raise verros

        old_config['mode'] = old_config['mode'].lower()
        old_config['shutdown'] = old_config['shutdown'].lower()
        old_config['toemail'] = ';'.join(old_config['toemail']) if old_config['toemail'] else ''

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            if config['identifier'] != old_config['identifier']:
                await self.dismiss_alerts()

            await self._update_service(old_config, config)

        return await self.config()

    @private
    async def alerts_mapping(self):
        return {
            'LOWBATT': 'UPSBatteryLow',
            'COMMBAD': 'UPSCommbad',
            'COMMOK': 'UPSCommok',
            'ONBATT': 'UPSOnBattery',
            'ONLINE': 'UPSOnline',
            'REPLBATT': 'UPSReplbatt'
        }

    @private
    async def dismiss_alerts(self):
        config = await self.config()

        for alert in (await self.alerts_mapping()).values():
            await self.middleware.call('alert.oneshot_delete', alert, {'ups': config['identifier']})

    @private
    @accepts(
        Str('notify_type')
    )
    async def upssched_event(self, notify_type):
        config = await self.config()
        upsc_identifier = config['complete_identifier']
        cp = await run('upsc', upsc_identifier, check=False)
        if cp.returncode:
            stats_output = ''
            self.logger.error('Failed to retrieve ups information: %s', cp.stderr.decode())
        else:
            stats_output = cp.stdout.decode()

        if RE_TEST_IN_PROGRESS.search(stats_output):
            self.logger.debug('Self test is in progress and %r notify event should be ignored', notify_type)
            return

        if notify_type.lower() == 'shutdown':
            # Before we start FSD with upsmon, lets ensure that ups is not ONLINE (OL).
            # There are cases where battery/charger issues can result in ups.status being "OL LB" at the
            # same time. This will ensure that we don't initiate a shutdown if ups is OL.
            ups_status = RE_UPS_STATUS.findall(stats_output)
            if ups_status and 'ol' in ups_status[0].lower():
                self.middleware.logger.debug(
                    f'Shutdown not initiated as ups.status ({ups_status[0]}) indicates '
                    f'{config["identifier"]} is ONLINE (OL).'
                )
            else:
                syslog.syslog(syslog.LOG_NOTICE, 'upssched-cmd "issuing shutdown"')
                await run('upsmon', '-c', 'fsd', check=False)
        elif 'notify' in notify_type.lower():
            # notify_type is expected to be of the following format
            # NOTIFY-EVENT i.e NOTIFY-LOWBATT
            notify_type = notify_type.split('-')[-1]

            # We would like to send alerts for the following events
            alert_mapping = await self.alerts_mapping()

            await self.dismiss_alerts()

            if notify_type in alert_mapping:
                await self.middleware.call(
                    'alert.oneshot_create', alert_mapping[notify_type], {'ups': config['identifier']}
                )

            if config['emailnotify']:
                # Email user with the notification event and details
                # We send the email in the following format ( inclusive line breaks )

                # NOTIFICATION: 'LOWBATT'
                # UPS: 'ups'
                #
                # Statistics recovered:
                #
                # 1) Battery charge (percent)
                # battery.charge: 5
                #
                # 2) Remaining battery level when UPS switches to LB (percent)
                # battery.charge.low: 10
                #
                # 3) Battery runtime (seconds)
                # battery.runtime: 1860
                #
                # 4) Remaining battery runtime when UPS switches to LB (seconds)
                # battery.runtime.low: 900

                ups_name = config['identifier']
                hostname = socket.gethostname()
                current_time = datetime.datetime.now(tz=dateutil.tz.tzlocal()).strftime('%a %b %d %H:%M:%S %Z %Y')
                ups_subject = config['subject'].replace('%d', current_time).replace('%h', hostname)
                body = f'NOTIFICATION: {notify_type!r}\n\nUPS: {ups_name!r}\n\n'

                # Let's gather following stats
                data_points = {
                    'battery.charge': 'Battery charge (percent)',
                    'battery.charge.low': 'Battery level remaining (percent) when UPS switches to Low Battery (LB)',
                    'battery.charge.status': 'Battery charge status',
                    'battery.runtime': 'Battery runtime (seconds)',
                    'battery.runtime.low': 'Battery runtime remaining (seconds) when UPS switches to Low Battery (LB)',
                    'battery.runtime.restart': 'Minimum battery runtime (seconds) to allow UPS restart after power-off',
                }

                stats_output = (
                    await run('upsc', upsc_identifier, check=False)
                ).stdout
                recovered_stats = re.findall(
                    fr'({"|".join(data_points)}): (.*)',
                    '' if not stats_output else stats_output.decode()
                )

                if recovered_stats:
                    body += 'Statistics recovered:\n\n'
                    # recovered_stats is expected to be a list in this format
                    # [('battery.charge', '5'), ('battery.charge.low', '10'), ('battery.runtime', '1860')]
                    for index, stat in enumerate(recovered_stats):
                        body += f'{index + 1}) {data_points[stat[0]]}\n  {stat[0]}: {stat[1]}\n\n'

                else:
                    body += 'Statistics could not be recovered\n'

                # Subject and body defined, send email
                job = await self.middleware.call(
                    'mail.send', {
                        'subject': ups_subject,
                        'text': body,
                        'to': config['toemail']
                    }
                )

                await job.wait()
                if job.error:
                    self.middleware.logger.debug(f'Failed to send UPS status email: {job.error}')

        else:
            self.middleware.logger.debug(f'Unrecognized UPS notification event: {notify_type}')
示例#6
0
class RsyncTaskService(TaskPathService):

    share_task_type = 'Rsync'

    class Config:
        datastore = 'tasks.rsync'
        datastore_prefix = 'rsync_'
        datastore_extend = 'rsynctask.rsync_task_extend'
        datastore_extend_context = 'rsynctask.rsync_task_extend_context'

    @private
    async def rsync_task_extend(self, data, context):
        data['extra'] = shlex.split(data['extra'].replace('"', r'"\"').replace(
            "'", r'"\"'))
        for field in ('mode', 'direction'):
            data[field] = data[field].upper()
        Cron.convert_db_format_to_schedule(data)
        data['job'] = context['jobs'].get(data['id'])
        return data

    @private
    async def rsync_task_extend_context(self, extra):
        jobs = {}
        for j in await self.middleware.call("core.get_jobs",
                                            [("method", "=", "rsynctask.run")],
                                            {"order_by": ["id"]}):
            try:
                task_id = int(j["arguments"][0])
            except (IndexError, ValueError):
                continue

            if task_id in jobs and jobs[task_id]["state"] == "RUNNING":
                continue

            jobs[task_id] = j

        return {
            "jobs": jobs,
        }

    @private
    async def validate_rsync_task(self, data, schema):
        verrors = ValidationErrors()

        # Windows users can have spaces in their usernames
        # http://www.freebsd.org/cgi/query-pr.cgi?pr=164808

        username = data.get('user')
        if ' ' in username:
            verrors.add(f'{schema}.user', 'User names cannot have spaces')
            raise verrors

        user = None
        with contextlib.suppress(KeyError):
            user = await self.middleware.call('dscache.get_uncached_user',
                                              username)

        if not user:
            verrors.add(f'{schema}.user',
                        f'Provided user "{username}" does not exist')
            raise verrors

        await self.validate_path_field(data, schema, verrors)

        remote_host = data.get('remotehost')
        if not remote_host:
            verrors.add(f'{schema}.remotehost', 'Please specify a remote host')

        if data.get('extra'):
            data['extra'] = ' '.join(data['extra'])
        else:
            data['extra'] = ''

        mode = data.get('mode')
        if not mode:
            verrors.add(f'{schema}.mode', 'This field is required')

        remote_module = data.get('remotemodule')
        if mode == 'MODULE' and not remote_module:
            verrors.add(f'{schema}.remotemodule', 'This field is required')

        if mode == 'SSH':
            remote_port = data.get('remoteport')
            if not remote_port:
                verrors.add(f'{schema}.remoteport', 'This field is required')

            remote_path = data.get('remotepath')
            if not remote_path:
                verrors.add(f'{schema}.remotepath', 'This field is required')

            search = os.path.join(user['pw_dir'], '.ssh', 'id_[edr]*')
            exclude_from_search = os.path.join(user['pw_dir'], '.ssh',
                                               'id_[edr]*pub')
            key_files = set(glob.glob(search)) - set(
                glob.glob(exclude_from_search))
            if not key_files:
                verrors.add(
                    f'{schema}.user',
                    'In order to use rsync over SSH you need a user'
                    ' with a private key (DSA/ECDSA/RSA) set up in home dir.')
            else:
                for file in glob.glob(search):
                    if '.pub' not in file:
                        # file holds a private key and it's permissions should be 600
                        if os.stat(file).st_mode & 0o077 != 0:
                            verrors.add(
                                f'{schema}.user',
                                f'Permissions {oct(os.stat(file).st_mode & 0o777)} for {file} are too open. Please '
                                f'correct them by running chmod 600 {file}')

            if (data['enabled'] and data['validate_rpath'] and remote_path
                    and remote_host and remote_port):
                if '@' in remote_host:
                    remote_username, remote_host = remote_host.rsplit('@', 1)
                else:
                    remote_username = username

                try:
                    async with await asyncio.wait_for(
                            asyncssh.connect(remote_host,
                                             port=remote_port,
                                             username=remote_username,
                                             client_keys=key_files,
                                             known_hosts=None),
                            timeout=5,
                    ) as conn:
                        await conn.run(f'test -d {shlex.quote(remote_path)}',
                                       check=True)
                except asyncio.TimeoutError:

                    verrors.add(
                        f'{schema}.remotehost',
                        'SSH timeout occurred. Remote path cannot be validated.'
                    )

                except OSError as e:

                    if e.errno == 113:
                        verrors.add(
                            f'{schema}.remotehost',
                            f'Connection to the remote host {remote_host} on port {remote_port} failed.'
                        )
                    else:
                        verrors.add(f'{schema}.remotehost', e.__str__())

                except asyncssh.DisconnectError as e:

                    verrors.add(
                        f'{schema}.remotehost',
                        f'Disconnect Error[ error code {e.code} ] was generated when trying to '
                        f'communicate with remote host {remote_host} and remote user {remote_username}.'
                    )

                except asyncssh.ProcessError as e:

                    if e.code == '1':
                        verrors.add(
                            f'{schema}.remotepath',
                            'The Remote Path you specified does not exist or is not a directory.'
                            'Either create one yourself on the remote machine or uncheck the '
                            'validate_rpath field')
                    else:
                        verrors.add(
                            f'{schema}.remotepath',
                            f'Connection to Remote Host was successful but failed to verify '
                            f'Remote Path. {e.__str__()}')

                except asyncssh.Error as e:

                    if e.__class__.__name__ in e.__str__():
                        exception_reason = e.__str__()
                    else:
                        exception_reason = e.__class__.__name__ + ' ' + e.__str__(
                        )
                    verrors.add(
                        f'{schema}.remotepath',
                        f'Remote Path could not be validated. An exception was raised. {exception_reason}'
                    )
            elif data['enabled'] and data['validate_rpath']:
                verrors.add(
                    f'{schema}.remotepath',
                    'Remote path could not be validated because of missing fields'
                )

        data.pop('validate_rpath', None)

        # Keeping compatibility with legacy UI
        for field in ('mode', 'direction'):
            data[field] = data[field].lower()

        return verrors, data

    @accepts(
        Dict(
            'rsync_task_create',
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('user', required=True),
            Str('remotehost'),
            Int('remoteport'),
            Str('mode', enum=['MODULE', 'SSH'], default='MODULE'),
            Str('remotemodule'),
            Str('remotepath'),
            Bool('validate_rpath', default=True),
            Str('direction', enum=['PULL', 'PUSH'], default='PUSH'),
            Str('desc'),
            Cron(
                'schedule',
                defaults={'minute': '00'},
            ),
            Bool('recursive'),
            Bool('times'),
            Bool('compress'),
            Bool('archive'),
            Bool('delete'),
            Bool('quiet'),
            Bool('preserveperm'),
            Bool('preserveattr'),
            Bool('delayupdates'),
            List('extra', items=[Str('extra')]),
            Bool('enabled', default=True),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsync Task.

        See the comment in Rsyncmod about `path` length limits.

        `remotehost` is ip address or hostname of the remote system. If username differs on the remote host,
        "username@remote_host" format should be used.

        `mode` represents different operating mechanisms for Rsync i.e Rsync Module mode / Rsync SSH mode.

        `remotemodule` is the name of remote module, this attribute should be specified when `mode` is set to MODULE.

        `remotepath` specifies the path on the remote system.

        `validate_rpath` is a boolean which when sets validates the existence of the remote path.

        `direction` specifies if data should be PULLED or PUSHED from the remote system.

        `compress` when set reduces the size of the data which is to be transmitted.

        `archive` when set makes rsync run recursively, preserving symlinks, permissions, modification times, group,
        and special files.

        `delete` when set deletes files in the destination directory which do not exist in the source directory.

        `preserveperm` when set preserves original file permissions.

        .. examples(websocket)::

          Create a Rsync Task which pulls data from a remote system every 5 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "rsynctask.create",
                "params": [{
                    "enabled": true,
                    "schedule": {
                        "minute": "5",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "desc": "Test rsync task",
                    "user": "******",
                    "mode": "MODULE",
                    "remotehost": "[email protected]",
                    "compress": true,
                    "archive": true,
                    "direction": "PULL",
                    "path": "/mnt/vol1/rsync_dataset",
                    "remotemodule": "remote_module1"
                }]
            }
        """
        verrors, data = await self.validate_rsync_task(data,
                                                       'rsync_task_create')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(data['id'])

    @accepts(Int('id', validators=[Range(min=1)]),
             Patch('rsync_task_create', 'rsync_task_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsync Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        old.pop('job')

        new = old.copy()
        new.update(data)

        verrors, data = await self.validate_rsync_task(new,
                                                       'rsync_task_update')
        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)
        new.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        await self.middleware.call('service.restart', 'cron')

        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsync Task of `id`.
        """
        res = await self.middleware.call('datastore.delete',
                                         self._config.datastore, id)
        await self.middleware.call('service.restart', 'cron')
        return res

    @private
    async def commandline(self, id):
        """
        Helper method to generate the rsync command avoiding code duplication.
        """
        rsync = await self._get_instance(id)
        path = shlex.quote(rsync['path'])

        line = ['/usr/local/bin/rsync']
        for name, flag in (
            ('archive', '-a'),
            ('compress', '-z'),
            ('delayupdates', '--delay-updates'),
            ('delete', '--delete-delay'),
            ('preserveattr', '-X'),
            ('preserveperm', '-p'),
            ('recursive', '-r'),
            ('times', '-t'),
        ):
            if rsync[name]:
                line.append(flag)
        if rsync['extra']:
            line.append(' '.join(rsync['extra']))

        # Do not use username if one is specified in host field
        # See #5096 for more details
        if '@' in rsync['remotehost']:
            remote = rsync['remotehost']
        else:
            remote = f'"{rsync["user"]}"@{rsync["remotehost"]}'

        if rsync['mode'] == 'MODULE':
            module_args = [path, f'{remote}::"{rsync["remotemodule"]}"']
            if rsync['direction'] != 'PUSH':
                module_args.reverse()
            line += module_args
        else:
            line += [
                '-e',
                f'"ssh -p {rsync["remoteport"]} -o BatchMode=yes -o StrictHostKeyChecking=yes"'
            ]
            path_args = [
                path, f'{remote}:"{shlex.quote(rsync["remotepath"])}"'
            ]
            if rsync['direction'] != 'PUSH':
                path_args.reverse()
            line += path_args

        if rsync['quiet']:
            line += ['>', '/dev/null', '2>&1']

        return ' '.join(line)

    @item_method
    @accepts(Int('id'))
    @job(lock=lambda args: args[-1], logs=True)
    def run(self, job, id):
        """
        Job to run rsync task of `id`.

        Output is saved to job log excerpt (not syslog).
        """
        rsync = self.middleware.call_sync('rsynctask.get_instance', id)
        if rsync['locked']:
            self.middleware.call_sync('rsynctask.generate_locked_alert', id)
            return

        commandline = self.middleware.call_sync('rsynctask.commandline', id)

        cp = run_command_with_user_context(commandline, rsync['user'],
                                           lambda v: job.logs_fd.write(v))

        for klass in ('RsyncSuccess',
                      'RsyncFailed') if not rsync['quiet'] else ():
            self.middleware.call_sync('alert.oneshot_delete', klass,
                                      rsync['id'])

        if cp.returncode not in RsyncReturnCode.nonfatals():
            if not rsync['quiet']:
                self.middleware.call_sync(
                    'alert.oneshot_create', 'RsyncFailed', {
                        'id': rsync['id'],
                        'direction': rsync['direction'],
                        'path': rsync['path'],
                    })

            raise CallError(
                f'rsync command returned {cp.returncode}. Check logs for further information.'
            )
        elif not rsync['quiet']:
            self.middleware.call_sync(
                'alert.oneshot_create', 'RsyncSuccess', {
                    'id': rsync['id'],
                    'direction': rsync['direction'],
                    'path': rsync['path'],
                })
示例#7
0
class TrueNASService(Service):

    class Config:
        cli_namespace = "system.truenas"

    @accepts()
    @cli_private
    async def get_chassis_hardware(self):
        """
        Returns what type of hardware this is, detected from dmidecode.

        TRUENAS-X10-HA-D
        TRUENAS-X10-S
        TRUENAS-X20-HA-D
        TRUENAS-X20-S
        TRUENAS-M40-HA
        TRUENAS-M40-S
        TRUENAS-M50-HA
        TRUENAS-M50-S
        TRUENAS-M60-HA
        TRUENAS-M60-S
        TRUENAS-Z20-S
        TRUENAS-Z20-HA-D
        TRUENAS-Z30-HA-D
        TRUENAS-Z30-S
        TRUENAS-Z35-HA-D
        TRUENAS-Z35-S
        TRUENAS-Z50-HA-D
        TRUENAS-Z50-S

        Nothing in dmidecode but a M, X or Z class machine:
        (Note this means production didn't burn the hardware model
        into SMBIOS. We can detect this case by looking at the
        motherboard)
        TRUENAS-M
        TRUENAS-X
        TRUENAS-Z

        Really NFI about hardware at this point.  TrueNAS on a Dell?
        TRUENAS-UNKNOWN
        """

        data = await self.middleware.call('system.dmidecode_info')
        chassis = data['system-product-name']
        if chassis.startswith(('TRUENAS-M', 'TRUENAS-X', 'TRUENAS-Z')):
            return chassis
        # We don't match a burned in name for a M, X or Z series.  Let's catch
        # the case where we are a M, X or Z. (shame on you production!)
        motherboard = data['baseboard-manufacturer']
        motherboard_model = data['baseboard-product-name']
        if motherboard_model == 'X11DPi-NT' or motherboard_model == 'X11SPi-TF':
            return 'TRUENAS-M'
        if motherboard_model == 'iXsystems TrueNAS X10':
            return 'TRUENAS-X'
        if motherboard == 'GIGABYTE':
            return 'TRUENAS-Z'

        # Give up
        return 'TRUENAS-UNKNOWN'

    @accepts()
    @cli_private
    def get_eula(self):
        """
        Returns the TrueNAS End-User License Agreement (EULA).
        """
        if not os.path.exists(EULA_FILE):
            return
        with open(EULA_FILE, 'r', encoding='utf8') as f:
            return f.read()

    @accepts()
    @cli_private
    async def is_eula_accepted(self):
        """
        Returns whether the EULA is accepted or not.
        """
        return not os.path.exists(EULA_PENDING_PATH)

    @accepts()
    async def accept_eula(self):
        """
        Accept TrueNAS EULA.
        """
        try:
            os.unlink(EULA_PENDING_PATH)
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

    @private
    async def unaccept_eula(self):
        with open(EULA_PENDING_PATH, "w"):
            pass

    @accepts()
    async def get_customer_information(self):
        """
        Returns stored customer information.
        """
        result = await self.__fetch_customer_information()
        return result

    @accepts(Dict(
        'customer_information_update',
        Str('company'),
        Dict('administrative_user', *user_attrs),
        Dict('technical_user', *user_attrs),
        Dict(
            'reseller',
            Str('company'),
            Str('first_name'),
            Str('last_name'),
            Str('title'),
            Str('office_phone'),
            Str('mobile_phone'),
        ),
        Dict(
            'physical_location',
            Str('address'),
            Str('city'),
            Str('state'),
            Str('zip'),
            Str('country'),
            Str('contact_name'),
            Str('contact_phone_number'),
            Str('contact_email'),
        ),
        Str('primary_use_case'),
        Str('other_primary_use_case'),
    ))
    async def update_customer_information(self, data):
        """
        Updates customer information.
        """
        customer_information = await self.__fetch_customer_information()

        await self.middleware.call('datastore.update', 'truenas.customerinformation', customer_information["id"], {
            "data": json.dumps(data),
            "updated_at": datetime.utcnow(),
        })

        return customer_information

    async def __fetch_customer_information(self):
        result = await self.middleware.call('datastore.config', 'truenas.customerinformation')
        result["immutable_data"] = await self.__fetch_customer_information_immutable_data()
        result["data"] = json.loads(result["data"])
        result["needs_update"] = datetime.utcnow() - result["updated_at"] > timedelta(days=365)
        return result

    async def __fetch_customer_information_immutable_data(self):
        license = (await self.middleware.call('system.info'))['license']
        if license is None:
            return None

        return {
            "serial_number": license['system_serial'],
            "serial_number_ha": license['system_serial_ha'],
            "support_level": license['contract_type'].title(),
            "support_start_date": license['contract_start'].isoformat(),
            "support_end_date": license['contract_end'].isoformat(),
        }

    @accepts()
    async def is_production(self):
        """
        Returns if system is marked as production.
        """
        return await self.middleware.call('keyvalue.get', 'truenas:production', False)

    @accepts(Bool('production'), Bool('attach_debug', default=False))
    @job()
    async def set_production(self, job, production, attach_debug):
        """
        Sets system production state and optionally sends initial debug.
        """
        was_production = await self.is_production()
        await self.middleware.call('keyvalue.set', 'truenas:production', production)

        if not was_production and production:
            serial = (await self.middleware.call('system.info'))["system_serial"]
            return await job.wrap(await self.middleware.call('support.new_ticket', {
                "title": f"System has been just put into production ({serial})",
                "body": "This system has been just put into production",
                "attach_debug": attach_debug,
                "category": "Installation/Setup",
                "criticality": "Inquiry",
                "environment": "Production",
                "name": "Automatic Alert",
                "email": "*****@*****.**",
                "phone": "-",
            }))
示例#8
0
class KubernetesService(ConfigService):
    class Config:
        datastore = 'services.kubernetes'
        datastore_extend = 'kubernetes.k8s_extend'

    @private
    async def k8s_extend(self, data):
        data['dataset'] = os.path.join(
            data['pool'], 'ix-applications') if data['pool'] else None
        data.pop('cni_config')
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        if data['pool'] and not await self.middleware.call(
                'pool.query', [['name', '=', data['pool']]]):
            verrors.add(
                f'{schema}.pool',
                'Please provide a valid pool configured in the system.')

        if ipaddress.ip_address(
                data['cluster_dns_ip']) not in ipaddress.ip_network(
                    data['service_cidr']):
            verrors.add(f'{schema}.cluster_dns_ip',
                        'Must be in range of "service_cidr".')

        if data['node_ip'] not in await self.bindip_choices():
            verrors.add(f'{schema}.node_ip',
                        'Please provide a valid IP address.')

        for k, _ in await self.validate_interfaces(data):
            verrors.add(f'{schema}.{k}', 'Please specify a valid interface.')

        for k in ('route_v4', 'route_v6'):
            gateway = data[f'{k}_gateway']
            interface = data[f'{k}_interface']
            if (not gateway and not interface) or (gateway and interface):
                continue
            for k2 in ('gateway', 'interface'):
                verrors.add(
                    f'{schema}.{k}_{k2}',
                    f'{k}_gateway and {k}_interface must be specified together.'
                )

        verrors.check()

    @private
    async def validate_interfaces(self, data):
        errors = []
        interfaces = {
            i['name']: i
            for i in await self.middleware.call('interface.query')
        }
        for k in filter(lambda k: data[k] and data[k] not in interfaces,
                        ('route_v4_interface', 'route_v6_interface')):
            errors.append((k, data[k]))
        return errors

    @accepts(
        Dict(
            'kubernetes_update',
            Str('pool', empty=False, null=True),
            IPAddr('cluster_cidr', cidr=True),
            IPAddr('service_cidr', cidr=True),
            IPAddr('cluster_dns_ip'),
            IPAddr('node_ip'),
            Str('route_v4_interface', null=True),
            IPAddr('route_v4_gateway', null=True, v6=False),
            Str('route_v6_interface', null=True),
            IPAddr('route_v6_gateway', null=True, v4=False),
            update=True,
        ))
    @job(lock='kubernetes_update')
    async def do_update(self, job, data):
        """
        `pool` must be a valid ZFS pool configured in the system. Kubernetes service will initialise the pool by
        creating datasets under `pool_name/ix-applications`.

        `cluster_cidr` is the CIDR to be used for default NAT network between workloads.

        `service_cidr` is the CIDR to be used for kubernetes services which are an abstraction and refer to a
        logically set of kubernetes pods.

        `cluster_dns_ip` is the IP of the DNS server running for the kubernetes cluster. It must be in the range
        of `service_cidr`.

        Specifying values for `cluster_cidr`, `service_cidr` and `cluster_dns_ip` are permanent and a subsequent change
        requires re-initialisation of the applications. To clarify, system will destroy old `ix-applications` dataset
        and any data within it when any of the values for the above configuration change.

        `node_ip` is the IP address which the kubernetes cluster will assign to the TrueNAS node. It defaults to
        0.0.0.0 and the cluster in this case will automatically manage which IP address to use for managing traffic
        for default NAT network.

        By default kubernetes pods will be using default gateway of the system for outward traffic. This might
        not be desirable for certain users who want to separate NAT traffic over a specific interface / route. System
        will create a L3 network which will be routing the traffic towards default gateway for NAT.

        If users want to restrict traffic over a certain gateway / interface, they can specify a default route
        for the NAT traffic. `route_v4_interface` and `route_v4_gateway` will set a default route for the kubernetes
        cluster IPv4 traffic. Similarly `route_v6_interface` and 'route_v6_gateway` can be used to specify default
        route for IPv6 traffic.
        """
        old_config = await self.config()
        old_config.pop('dataset')
        config = old_config.copy()
        config.update(data)

        await self.validate_data(config, 'kubernetes_update')

        if len(set(old_config.items()) ^ set(config.items())) > 0:
            config['cni_config'] = {}
            await self.middleware.call('datastore.update',
                                       self._config.datastore,
                                       old_config['id'], config)
            await self.middleware.call('kubernetes.status_change')
            if config['pool'] != old_config['pool']:
                await self.middleware.call('catalog.sync_all')

        return await self.config()

    @accepts()
    async def bindip_choices(self):
        """
        Returns ip choices for Kubernetes service to use.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use', {
                'static': True,
                'any': True
            })
        }

    @private
    async def validate_k8s_setup(self):
        k8s_config = await self.middleware.call('kubernetes.config')
        if not k8s_config['dataset']:
            raise CallError('Please configure kubernetes pool.')
        if not await self.middleware.call('service.started', 'kubernetes'):
            raise CallError('Kubernetes service is not running.')
示例#9
0
文件: reporting.py 项目: tejp/freenas
class ReportingService(ConfigService):

    class Config:
        datastore = 'system.reporting'

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__rrds = {}
        for name, klass in RRD_PLUGINS.items():
            self.__rrds[name] = klass(self.middleware)

    @accepts(
        Dict(
            'reporting_update',
            Bool('cpu_in_percentage'),
            Str('graphite'),
            Int('graph_age', validators=[Range(min=1)]),
            Int('graph_points', validators=[Range(min=1)]),
            Bool('confirm_rrd_destroy'),
            update=True
        )
    )
    async def do_update(self, data):
        """
        Configure Reporting Database settings.

        If `cpu_in_percentage` is `true`, collectd reports CPU usage in percentage instead of "jiffies".

        `graphite` specifies a destination hostname or IP for collectd data sent by the Graphite plugin..

        `graph_age` specifies the maximum age of stored graphs in months. `graph_points` is the number of points for
        each hourly, daily, weekly, etc. graph. Changing these requires destroying the current reporting database,
        so when these fields are changed, an additional `confirm_rrd_destroy: true` flag must be present.

        .. examples(websocket)::

          Update reporting settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "cpu_in_percentage": false,
                    "graphite": "",
                }]
            }

          Recreate reporting database with new settings

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.update",
                "params": [{
                    "graph_age": 12,
                    "graph_points": 1200,
                    "confirm_rrd_destroy": true,
                }]
            }
        """

        confirm_rrd_destroy = data.pop('confirm_rrd_destroy', False)

        old = await self.config()

        new = copy.deepcopy(old)
        new.update(data)

        verrors = ValidationErrors()

        destroy_database = False
        for k in ['graph_age', 'graph_points']:
            if old[k] != new[k]:
                destroy_database = True

                if not confirm_rrd_destroy:
                    verrors.add(
                        f'reporting_update.{k}',
                        _('Changing this option requires destroying the reporting database. This action must be '
                          'confirmed by setting confirm_rrd_destroy flag'),
                    )

        if verrors:
            raise verrors

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            old['id'],
            new,
            {'prefix': self._config.datastore_prefix}
        )

        if destroy_database:
            await self.middleware.call('service.stop', 'collectd')
            await self.middleware.call('service.stop', 'rrdcached')
            await run('sh', '-c', 'rm -rfx /var/db/collectd/rrd/*', check=False)
            await self.middleware.call('reporting.setup')
            await self.middleware.call('service.start', 'rrdcached')

        await self.middleware.call('service.restart', 'collectd')

        return await self.config()

    @private
    def setup(self):
        systemdatasetconfig = self.middleware.call_sync('systemdataset.config')
        if not systemdatasetconfig['path']:
            self.middleware.logger.error(f'System dataset is not mounted')
            return False

        rrd_mount = f'{systemdatasetconfig["path"]}/rrd-{systemdatasetconfig["uuid"]}'
        if not os.path.exists(rrd_mount):
            self.middleware.logger.error(f'{rrd_mount} does not exist or is not a directory')
            return False

        # Ensure that collectd working path is a symlink to system dataset
        pwd = '/var/db/collectd/rrd'
        if os.path.exists(pwd) and (not os.path.isdir(pwd) or not os.path.islink(pwd)):
            shutil.move(pwd, f'{pwd}.{time.strftime("%Y%m%d%H%M%S")}')
        if not os.path.exists(pwd):
            os.symlink(rrd_mount, pwd)

        # Migrate legacy RAMDisk
        persist_file = '/data/rrd_dir.tar.bz2'
        if os.path.isfile(persist_file):
            with tarfile.open(persist_file) as tar:
                if 'collectd/rrd' in tar.getnames():
                    tar.extractall(pwd, get_members(tar, 'collectd/rrd/'))

            os.unlink('/data/rrd_dir.tar.bz2')

        hostname = self.middleware.call_sync('system.info')['hostname']
        if not hostname:
            hostname = self.middleware.call_sync('network.configuration.config')['hostname_local']

        # Migrate from old version, where `hostname` was a real directory and `localhost` was a symlink.
        # Skip the case where `hostname` is "localhost", so symlink was not (and is not) needed.
        if (
            hostname != 'localhost' and
            os.path.isdir(os.path.join(pwd, hostname)) and
            not os.path.islink(os.path.join(pwd, hostname))
        ):
            if os.path.exists(os.path.join(pwd, 'localhost')):
                if os.path.islink(os.path.join(pwd, 'localhost')):
                    os.unlink(os.path.join(pwd, 'localhost'))
                else:
                    # This should not happen, but just in case
                    shutil.move(
                        os.path.join(pwd, 'localhost'),
                        os.path.join(pwd, f'localhost.bak.{time.strftime("%Y%m%d%H%M%S")}')
                    )
            shutil.move(os.path.join(pwd, hostname), os.path.join(pwd, 'localhost'))

        # Remove all directories except "localhost" and its backups (that may be erroneously created by
        # running collectd before this script)
        to_remove_dirs = [
            os.path.join(pwd, d) for d in os.listdir(pwd)
            if not d.startswith('localhost') and os.path.isdir(os.path.join(pwd, d))
        ]
        for r_dir in to_remove_dirs:
            subprocess.run(['rm', '-rfx', r_dir])

        # Remove all symlinks (that are stale if hostname was changed).
        to_remove_symlinks = [
            os.path.join(pwd, l) for l in os.listdir(pwd)
            if os.path.islink(os.path.join(pwd, l))
        ]
        for r_symlink in to_remove_symlinks:
            os.unlink(r_symlink)

        # Create "localhost" directory if it does not exist
        if not os.path.exists(os.path.join(pwd, 'localhost')):
            os.makedirs(os.path.join(pwd, 'localhost'))

        # Create "${hostname}" -> "localhost" symlink if necessary
        if hostname != 'localhost':
            os.symlink(os.path.join(pwd, 'localhost'), os.path.join(pwd, hostname))

        # Let's return a positive value to indicate that necessary collectd operations were performed successfully
        return True

    @filterable
    def graphs(self, filters, options):
        return filter_list([i.__getstate__() for i in self.__rrds.values()], filters, options)

    def __rquery_to_start_end(self, query):
        unit = query.get('unit')
        if unit:
            verrors = ValidationErrors()
            for i in ('start', 'end'):
                if i in query:
                    verrors.add(
                        f'reporting_query.{i}',
                        f'{i!r} should only be used if "unit" attribute is not provided.',
                    )
            verrors.check()
        else:
            if 'start' not in query:
                unit = 'HOURLY'
            else:
                starttime = query['start']
                endtime = query.get('end') or 'now'

        if unit:
            unit = unit[0].lower()
            page = query['page']
            starttime = f'end-{page + 1}{unit}'
            if not page:
                endtime = 'now'
            else:
                endtime = f'now-{page}{unit}'
        return starttime, endtime

    @accepts(
        List('graphs', items=[
            Dict(
                'graph',
                Str('name', required=True),
                Str('identifier', default=None, null=True),
            ),
        ], empty=False),
        Dict(
            'reporting_query',
            Str('unit', enum=['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']),
            Int('page', default=0),
            Str('start', empty=False),
            Str('end', empty=False),
            Bool('aggregate', default=True),
            register=True,
        )
    )
    def get_data(self, graphs, query):
        """
        Get reporting data for given graphs.

        List of possible graphs can be retrieved using `reporting.graphs` call.

        For the time period of the graph either `unit` and `page` OR `start` and `end` should be
        used, not both.

        `aggregate` will return aggregate available data for each graph (e.g. min, max, mean).

        .. examples(websocket)::

          Get graph data of "nfsstat" from the last hour.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "reporting.get_data",
                "params": [
                    [{"name": "nfsstat"}],
                    {"unit": "HOURLY"},
                ]
            }

        """
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for i in graphs:
            try:
                rrd = self.__rrds[i['name']]
            except KeyError:
                raise CallError(f'Graph {i["name"]!r} not found.', errno.ENOENT)
            rv.append(
                rrd.export(i['identifier'], starttime, endtime, aggregate=query['aggregate'])
            )
        return rv

    @private
    @accepts(Ref('reporting_query'))
    def get_all(self, query):
        starttime, endtime = self.__rquery_to_start_end(query)
        rv = []
        for rrd in self.__rrds.values():
            idents = rrd.get_identifiers()
            if idents is None:
                idents = [None]
            for ident in idents:
                rv.append(rrd.export(ident, starttime, endtime, aggregate=query['aggregate']))
        return rv

    @private
    def get_plugin_and_rrd_types(self, name_idents):
        rv = []
        for name, identifier in name_idents:
            rrd = self.__rrds[name]
            rv.append(((name, identifier), rrd.plugin, rrd.get_rrd_types(identifier)))
        return rv
示例#10
0
class InterfaceService(Service):

    delegates = []

    class Config:
        namespace_alias = "interfaces"

    @private
    def register_listen_delegate(self, delegate):
        self.delegates.append(delegate)

    @accepts()
    @returns(
        List('services_to_be_restarted',
             items=[
                 Dict(
                     'service_restart',
                     Str('type', required=True),
                     Str('service', required=True),
                     List('ips', required=True, items=[Str('ip')]),
                 )
             ]))
    async def services_restarted_on_sync(self):
        """
        Returns which services will be set to listen on 0.0.0.0 (and, thus, restarted) on sync.

        Example result:
        [
            // Samba service will be set ot listen on 0.0.0.0 and restarted because it was set up to listen on
            // 192.168.0.1 which is being removed.
            {"type": "SYSTEM_SERVICE", "service": "cifs", "ips": ["192.168.0.1"]},
        ]
        """
        return [
            dict(await pd.delegate.repr(pd.state), ips=pd.addresses)
            for pd in await self.listen_delegates_prepare()
        ]

    @private
    async def listen_delegates_prepare(self):
        original_datastores = await self.middleware.call(
            "interface.get_original_datastores")
        if not original_datastores:
            return []

        datastores = await self.middleware.call("interface.get_datastores")

        old_addresses = self._collect_addresses(original_datastores)
        addresses = self._collect_addresses(datastores)
        gone_addresses = old_addresses - addresses

        result = []
        for delegate in self.delegates:
            state = await delegate.get_listen_state(gone_addresses)
            delegate_addresses = [
                address for address in gone_addresses
                if await delegate.listens_on(state, address)
            ]
            if delegate_addresses:
                result.append(
                    PreparedDelegate(delegate, state, delegate_addresses))

        return result

    def _collect_addresses(self, datastores):
        addresses = set()
        for iface, alias in zip_longest(datastores["interfaces"],
                                        datastores["alias"],
                                        fillvalue={}):
            addresses.add(iface.get("int_address", ""))
            addresses.add(iface.get("int_address_b", ""))
            addresses.add(iface.get("int_vip", ""))
            addresses.add(alias.get("alias_address", ""))
            addresses.add(alias.get("alias_address_b", ""))
            addresses.add(alias.get("alias_vip", ""))
        addresses.discard("")
        return addresses
示例#11
0
class PoolDatasetService(Service):
    class Config:
        namespace = 'pool.dataset'
        event_send = False

    @accepts(Str('dataset'))
    async def unlock_services_restart_choices(self, dataset):
        """
        Get a mapping of services identifiers and labels that can be restart on dataset unlock.
        """
        await self.middleware.call('pool.dataset.get_instance', dataset)
        services = {
            'afp': 'AFP',
            'cifs': 'SMB',
            'ftp': 'FTP',
            'iscsitarget': 'iSCSI',
            'nfs': 'NFS',
            'webdav': 'WebDAV',
        }

        result = {}
        for k, v in services.items():
            service = await self.middleware.call('service.query',
                                                 [['service', '=', k]],
                                                 {'get': True})
            if service['enable'] or service['state'] == 'RUNNING':
                result[k] = v

        check_services = {'kubernetes': 'Applications', 's3': 'S3', **services}

        result.update({
            k: check_services[k]
            for k in map(
                lambda a: a['service'], await self.middleware.call(
                    'pool.dataset.attachments', dataset))
            if k in check_services
        })

        if osc.IS_FREEBSD:
            try:
                activated_pool = await self.middleware.call(
                    'jail.get_activated_pool')
            except Exception:
                activated_pool = None

            # If iocage is not activated yet, there is a chance that this pool might have it activated there
            if activated_pool is None:
                result['jails'] = 'Jails/Plugins'

        if await self.unlock_restarted_vms(dataset):
            result['vms'] = 'Virtual Machines'

        return result

    @private
    async def unlock_restarted_vms(self, dataset_name):
        result = []
        for vm in await self.middleware.call('vm.query',
                                             [('autostart', '=', True)]):
            for device in vm['devices']:
                if device['dtype'] not in ('DISK', 'RAW'):
                    continue

                path = device['attributes'].get('path')
                if not path:
                    continue

                if path.startswith(
                        f'/dev/zvol/{dataset_name}/') or path.startswith(
                            f'/mnt/{dataset_name}/'):
                    result.append(vm)
                    break

        return result

    @private
    async def restart_vms_after_unlock(self, dataset_name):
        for vm in await self.unlock_restarted_vms(dataset_name):
            if await self.middleware.call('vm.status',
                                          vm['id'])['state'] == 'RUNNING':
                stop_job = await self.middleware.call('vm.stop', vm['id'])
                await stop_job.wait()
                if stop_job.error:
                    self.logger.error('Failed to stop %r VM: %s', vm['name'],
                                      stop_job.error)
            try:
                self.middleware.call_sync('vm.start', vm['id'])
            except Exception:
                self.logger.error('Failed to start %r VM after %r unlock',
                                  vm['name'],
                                  dataset_name,
                                  exc_info=True)

    @private
    async def restart_services_after_unlock(self, dataset_name,
                                            services_to_restart):
        try:
            to_restart = [[i]
                          for i in set(services_to_restart) - {'jails', 'vms'}]
            if not to_restart:
                return

            restart_job = await self.middleware.call('core.bulk',
                                                     'service.restart',
                                                     to_restart)
            statuses = await restart_job.wait()
            for idx, srv_status in enumerate(statuses):
                if srv_status['error']:
                    self.logger.error(
                        'Failed to restart %r service after %r unlock: %s',
                        to_restart[idx], dataset_name, srv_status['error'])
            if 'jails' in services_to_restart:
                await self.middleware.call('jail.rc_action', ['RESTART'])
            if 'vms' in services_to_restart:
                await self.middleware.call(
                    'pool.dataset.restart_vms_after_unlock', dataset_name)
        except Exception:
            self.logger.error(
                'Failed to restart %r services after %r unlock',
                ', '.join(services_to_restart),
                id,
                exc_info=True,
            )
示例#12
0
class AuthService(Service):

    def __init__(self, *args, **kwargs):
        super(AuthService, self).__init__(*args, **kwargs)
        self.authtokens = AuthTokens()

    @accepts(Str('username'), Str('password'))
    def check_user(self, username, password):
        """
        Verify username and password
        """
        if username != 'root':
            return False
        try:
            user = self.middleware.call('datastore.query', 'account.bsdusers', [('bsdusr_username', '=', username)], {'get': True})
        except IndexError:
            return False
        if user['bsdusr_unixhash'] in ('x', '*'):
            return False
        return crypt.crypt(password, user['bsdusr_unixhash']) == user['bsdusr_unixhash']

    @accepts(Int('ttl', required=False))
    def generate_token(self, ttl=None):
        """Generate a token to be used for authentication."""
        if ttl is None:
            ttl = 600
        return self.authtokens.new(ttl)['id']

    @no_auth_required
    @accepts(Str('username'), Str('password'))
    @pass_app
    def login(self, app, username, password):
        """Authenticate session using username and password.
        Currently only root user is allowed.
        """
        valid = self.check_user(username, password)
        if valid:
            app.authenticated = True
        return valid

    @no_auth_required
    @accepts(Str('token'))
    @pass_app
    def token(self, app, token):
        """Authenticate using a given `token` id."""

        def update_token(app, message):
            """
            On every new message from the registered connection
            make sure the token is still valid, updating last time or
            removing authentication
            """
            token = self.authtokens.get_token_by_sessionid(app.sessionid)
            if token is None:
                return
            if int(time.time()) - token['ttl'] < token['last']:
                token['last'] = int(time.time())
            else:
                self.authtokens.pop_token(token['id'])
                app.authenticated = False

        def remove_session(app):
            """
            On connection close, remove session id from token
            """
            self.authtokens.remove_session(app.sessionid)

        token = self.authtokens.get_token(token)
        if token is None:
            return False

        """
        If token exists and is still valid (TTL) do the following:
          - authenticate the connection
          - add the session id to token
          - register connection callbacks to update/remove token
        """
        if int(time.time()) - token['ttl'] < token['last']:
            token['last'] = int(time.time())
            self.authtokens.add_session(app.sessionid, token)
            app.register_callback('on_message', update_token)
            app.register_callback('on_close', remove_session)
            app.authenticated = True
            return True
        else:
            self.authtokens.pop_token(token['id'])
            return False
示例#13
0
class ReplicationService(Service):

    @private
    def ssh_keyscan(self, host, port):
        proc = Popen([
            "/usr/bin/ssh-keyscan",
            "-p", str(port),
            "-T", "2",
            str(host),
        ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        key, errmsg = proc.communicate()
        if proc.returncode != 0 or not key:
            if not errmsg:
                errmsg = 'ssh key scan failed for unknown reason'
            raise ValueError(errmsg)
        return key

    @private
    @accepts(Dict(
        'replication-pair-data',
        Str('hostname', required=True),
        Str('public-key', required=True),
        Str('user'),
    ))
    def pair(self, data):
        """
        Receives public key, storing it to accept SSH connection and return
        pertinent SSH data of this machine.
        """
        service = self.middleware.call('datastore.query', 'services.services', [('srv_service', '=', 'ssh')], {'get': True})
        ssh = self.middleware.call('datastore.query', 'services.ssh', None, {'get': True})
        try:
            user = self.middleware.call('datastore.query', 'account.bsdusers', [('bsdusr_username', '=', data.get('user') or 'root')], {'get': True})
        except IndexError:
            raise ValueError('User "{}" does not exist'.format(data.get('user')))

        # Make sure SSH is enabled
        if not service['srv_enable']:
            self.middleware.call('datastore.update', 'services.services', service['id'], {'srv_enable': True})
            self.middleware.call('notifier.start', 'ssh')

            # This might be the first time of the service being enabled
            # which will then result in new host keys we need to grab
            ssh = self.middleware.call('datastore.query', 'services.ssh', None, {'get': True})

        if not os.path.exists(user['bsdusr_home'].encode('utf8')):
            raise ValueError('Homedir {} does not exist'.format(user['bsdusr_home']))

        # Write public key in user authorized_keys for SSH
        authorized_keys_file = '{}/.ssh/authorized_keys'.format(user['bsdusr_home'].encode('utf8'))
        with open(authorized_keys_file, 'a+') as f:
            f.seek(0)
            if data['public-key'] not in f.read():
                f.write('\n' + data['public-key'])

        ssh_hostkey = '{0} {1}\n{0} {2}\n{0} {3}\n'.format(
            data['hostname'],
            base64.b64decode(ssh['ssh_host_rsa_key_pub']),
            base64.b64decode(ssh['ssh_host_ecdsa_key_pub']),
            base64.b64decode(ssh['ssh_host_ed25519_key_pub']),
        )

        return {
            'ssh_port': ssh['ssh_tcpport'],
            'ssh_hostkey': ssh_hostkey,
        }
示例#14
0
class ConsulService(Service):

    INFLUXDB_API = [
        'host', 'username', 'password', 'database', 'series-name', 'enabled'
    ]
    SLACK_API = [
        'cluster-name', 'url', 'channel', 'username', 'icon-url', 'detailed',
        'enabled'
    ]
    MATTERMOST_API = [
        'cluster', 'url', 'username', 'password', 'team', 'channel', 'enabled'
    ]
    PAGERDUTY_API = ['service-key', 'client-name', 'enabled']
    HIPCHAT_API = [
        'from', 'cluster-name', 'base-url', 'room-id', 'auth-token', 'enabled'
    ]
    OPSGENIE_API = ['cluster-name', 'api-key', 'enabled']
    AWSSNS_API = ['reigion', 'topic-arn', 'enabled']
    VICTOROPS_API = ['api-key', 'routing-key', 'enabled']

    @accepts(Str('key'), Any('value'))
    def set_kv(self, key, value):
        """
        Sets `key` with `value` in Consul KV.

        Returns:
                    bool: True if it added successful the value or otherwise False.
        """
        c = consul.Consul()
        return c.kv.put(str(key), str(value))

    @accepts(Str('key'))
    def get_kv(self, key):
        """
        Gets value of `key` in Consul KV.

        Returns:
                    str: Return the value or an empty string.
        """
        c = consul.Consul()
        index = None
        index, data = c.kv.get(key, index=index)
        if data is not None:
            return data['Value'].decode("utf-8")
        else:
            return ""

    @accepts(Str('key'))
    def delete_kv(self, key):
        """
        Delete a `key` in Consul KV.

        Returns:
                    bool: True if it could delete the data or otherwise False.
        """
        c = consul.Consul()
        return c.kv.delete(str(key))

    def _convert_keys(self, data):
        """
        Transforms key values that contains "_" to values with "-"

        Returns:
                    dict: With the values on keys using "-".
        """
        for key in list(data.keys()):
            new_key = key.replace("_", "-")
            if new_key != key:
                data[new_key] = data[key]
                del data[key]

        return data

    def _api_keywords(self, api_list, data):
        """
        Helper to convert the API list into a dict.

        Returns:
                    dict: With the API_LIST.
        """
        new_dict = {k: data.get(k, None) for k in api_list}

        return new_dict

    def _insert_keys(self, prefix, data, api_keywords):
        """
        Helper to insert keys into consul.

        Note: because 'from' is a reserved word in Python, we can't
        use it directly and instead we use hfrom and convert it later.
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k, v in list(new_dict.items()):
            if k == 'hfrom':
                k = 'from'
            self.set_kv(prefix + k, v)

    def _delete_keys(self, prefix, data, api_keywords):
        """
        Helper to delete keys into consul.

        Note: The same applies for 'from' like explained on _insert_keys().
        """
        new_dict = self._api_keywords(api_keywords, data)

        for k in list(new_dict.keys()):
            if k == 'hfrom':
                k = 'from'
            self.delete_kv(prefix + k)

    def do_create(self, data):
        """
        Helper to insert keys into consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/'
        cdata = self._convert_keys(data)

        alert_service = data.pop('consulalert-type')
        consul_prefix = consul_prefix + alert_service.lower() + '/'

        if alert_service == 'InfluxDB':
            self._insert_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._insert_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._insert_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._insert_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._insert_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._insert_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._insert_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._insert_keys(consul_prefix, cdata, self.VICTOROPS_API)

    def do_delete(self, alert_service, data):
        """
        Helper to delete the keys from consul based on the service API.
        """
        consul_prefix = 'consul-alerts/config/notifiers/' + alert_service.lower(
        ) + '/'
        cdata = self._convert_keys(data)

        if alert_service == 'InfluxDB':
            self._delete_keys(consul_prefix, cdata, self.INFLUXDB_API)
        elif alert_service == 'Slack':
            self._delete_keys(consul_prefix, cdata, self.SLACK_API)
        elif alert_service == 'Mattermost':
            self._delete_keys(consul_prefix, cdata, self.MATTERMOST_API)
        elif alert_service == 'PagerDuty':
            self._delete_keys(consul_prefix, cdata, self.PAGERDUTY_API)
        elif alert_service == 'HipChat':
            self._delete_keys(consul_prefix, cdata, self.HIPCHAT_API)
        elif alert_service == 'OpsGenie':
            self._delete_keys(consul_prefix, cdata, self.OPSGENIE_API)
        elif alert_service == 'AWS-SNS':
            self._delete_keys(consul_prefix, cdata, self.AWSSNS_API)
        elif alert_service == 'VictorOps':
            self._delete_keys(consul_prefix, cdata, self.VICTOROPS_API)
示例#15
0
class DiskService(Service):

    @private
    async def destroy_partitions(self, disk):
        if osc.IS_LINUX:
            await run(['sgdisk', '-Z', os.path.join('/dev', disk)])
        else:
            await run('gpart', 'destroy', '-F', f'/dev/{disk}', check=False)
            # Wipe out the partition table by doing an additional iterate of create/destroy
            await run('gpart', 'create', '-s', 'gpt', f'/dev/{disk}')
            await run('gpart', 'destroy', '-F', f'/dev/{disk}')

    @private
    async def wipe_quick(self, dev, size=None):
        # If the size is too small, lets just skip it for now.
        # In the future we can adjust dd size
        if size and size < 33554432:
            return
        await run('dd', 'if=/dev/zero', f'of=/dev/{dev}', 'bs=1M', 'count=32')
        size = await self.middleware.call('disk.get_dev_size', dev)
        if not size:
            self.logger.error(f'Unable to determine size of {dev}')
        else:
            # This will fail when EOL is reached
            await run(
                'dd', 'if=/dev/zero', f'of=/dev/{dev}', 'bs=1M', f'oseek={int(size / (1024*1024)) - 32}', check=False
            )

    @accepts(
        Str('dev'),
        Str('mode', enum=['QUICK', 'FULL', 'FULL_RANDOM'], required=True),
        Bool('synccache', default=True),
        Ref('swap_removal_options'),
    )
    @job(lock=lambda args: args[0])
    async def wipe(self, job, dev, mode, sync, options=None):
        """
        Performs a wipe of a disk `dev`.
        It can be of the following modes:
          - QUICK: clean the first few and last megabytes of every partition and disk
          - FULL: write whole disk with zero's
          - FULL_RANDOM: write whole disk with random bytes
        """
        await self.middleware.call('disk.swaps_remove_disks', [dev], options)

        if osc.IS_FREEBSD:
            await self.middleware.call('disk.remove_disk_from_graid', dev)

        # First do a quick wipe of every partition to clean things like zfs labels
        if mode == 'QUICK':
            for part in await self.middleware.call('disk.list_partitions', dev):
                await self.wipe_quick(part['name'], part['size'])

        await self.middleware.call('disk.destroy_partitions', dev)

        if mode == 'QUICK':
            await self.wipe_quick(dev)
        else:
            size = await self.middleware.call('disk.get_dev_size', dev) or 1

            proc = await Popen([
                'dd',
                'if=/dev/{}'.format('zero' if mode == 'FULL' else 'random'),
                f'of=/dev/{dev}',
                'bs=1M',
            ], stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)

            async def dd_wait():
                while True:
                    if proc.returncode is not None:
                        break
                    os.kill(proc.pid, signal.SIGUSR1 if osc.IS_LINUX else signal.SIGINFO)
                    await asyncio.sleep(1)

            asyncio.ensure_future(dd_wait())

            while True:
                line = await proc.stderr.readline()
                if line == b'':
                    break
                line = line.decode()
                reg = RE_DD.search(line)
                if reg:
                    speed = float(reg.group(2)) if osc.IS_LINUX else int(reg.group(2))
                    if osc.IS_LINUX:
                        mapping = {'gb': 1024 * 1024 * 1024, 'mb': 1024 * 1024, 'kb': 1024, 'b': 1}
                        speed = int(speed * mapping[reg.group(3).lower()])
                    job.set_progress((int(reg.group(1)) / size) * 100, extra={'speed': speed})

        if sync:
            await self.middleware.call('disk.sync', dev)
示例#16
0
文件: idmap.py 项目: mike0615/freenas
class IdmapService(Service):
    class Config:
        private = False
        namespace = 'idmap'

    @accepts(Str('domain'))
    async def get_or_create_idmap_by_domain(self, domain):
        """
        Returns idmap settings based on pre-windows 2000 domain name (workgroup)
        If mapping exists, but there's no corresponding entry in the specified idmap
        table, then we generate a new one with the next available block of ids and return it.
        """
        my_domain = await self.middleware.call('idmap.domaintobackend.query',
                                               [('domain', '=', domain)])
        if not my_domain:
            raise CallError(
                f'No domain to idmap backend exists for domain [{domain}]',
                errno.ENOENT)

        backend_entry = await self.middleware.call(
            f'idmap.{my_domain[0]["idmap_backend"].lower()}.query',
            [('domain.idmap_domain_name', '=', domain)])
        if backend_entry:
            return backend_entry[0]

        next_idmap_range = await self.get_next_idmap_range()
        new_idmap = await self.middleware.call(
            f'idmap.{my_domain[0]["idmap_backend"].lower()}.create', {
                'domain': {
                    'id': my_domain[0]['id']
                },
                'range_low': next_idmap_range[0],
                'range_high': next_idmap_range[1]
            })
        return new_idmap

    @private
    async def idmap_domain_choices(self):
        choices = []
        domains = await self.middleware.call('idmap.domain.query')
        for domain in domains:
            choices.append(domain['name'])

        return choices

    @private
    async def get_idmap_legacy(self, obj_type, idmap_type):
        """
        This is compatibility shim for legacy idmap code
        utils.get_idmap()
        obj_type is dstype.
        idmap_type is the idmap backend
        If the we don't have a corresponding entry in the idmap backend table,
        automatically generate one.
        """
        idmap_type = idmap_type.lower()
        if idmap_type in ['adex', 'hash']:
            raise CallError(f'idmap backend {idmap_type} has been deprecated')

        ds_type = dstype(int(obj_type)).name

        if ds_type not in [
                'DS_TYPE_ACTIVEDIRECTORY', 'DS_TYPE_LDAP',
                'DS_TYPE_DEFAULT_DOMAIN'
        ]:
            raise CallError(f'idmap backends are not supported for {ds_type}')

        res = await self.middleware.call(
            f'idmap.{idmap_type}.query',
            [('domain.idmap_domain_name', '=', ds_type)])
        if res:
            return {
                'idmap_id': res[0]['id'],
                'idmap_type': idmap_type,
                'idmap_name': idmap_type
            }
        next_idmap_range = await self.get_next_idmap_range()
        new_idmap = await self.middleware.call(
            f'idmap.{idmap_type}.create', {
                'domain': {
                    'id': obj_type
                },
                'range_low': next_idmap_range[0],
                'range_high': next_idmap_range[1]
            })
        return {
            'idmap_id': new_idmap['id'],
            'idmap_type': idmap_type,
            'idmap_name': idmap_type
        }

    @private
    async def common_backend_extend(self, data):
        for key in ['ldap_server', 'schema_mode', 'ssl']:
            if key in data and data[key] is not None:
                data[key] = data[key].upper()

        return data

    @private
    async def common_backend_compress(self, data):
        for key in ['ldap_server', 'schema_mode', 'ssl']:
            if key in data and data[key] is not None:
                data[key] = data[key].lower()

        if 'id' in data['domain'] and data['domain']['id']:
            data['domain'] = data['domain']['id']
        elif 'idmap_domain_name' in data['domain'] and data['domain'][
                'idmap_domain_name']:
            domain_info = await self.middleware.call(
                'idmap.domain.query',
                [('domain', '=', data['domain']['idmap_domain_name'])])
            data['domain'] = domain_info[0]['id']
        else:
            domain_info = await self.middleware.call(
                'idmap.domain.query',
                [('domain', '=',
                  data['domain']['idmap_domain_dns_domain_name'].upper())])
            data['domain'] = domain_info[0]['id']

        return data

    @private
    async def _common_validate(self, data):
        """
        Common validation checks for all idmap backends.
        """
        verrors = ValidationErrors()
        if data['range_high'] < data['range_low']:
            verrors.add(
                f'idmap_range',
                'Idmap high range must be greater than idmap low range')
            return verrors

        configured_domains = await self.get_configured_idmap_domains()
        new_range = range(data['range_low'], data['range_high'])
        for i in configured_domains:
            if i['domain']['id'] == data['domain']:
                continue
            existing_range = range(i['backend_data']['range_low'],
                                   i['backend_data']['range_high'])
            if range(max(existing_range[0], new_range[0]),
                     min(existing_range[-1], new_range[-1]) + 1):
                verrors.add(
                    f'idmap_range',
                    f'new idmap range conflicts with existing range for domain [{i["domain"]["idmap_domain_name"]}]'
                )

        return verrors

    @accepts()
    async def get_configured_idmap_domains(self):
        """
        returns list of all configured idmap domains. A configured domain is one
        that exists in the domaintobackend table and has a corresponding backend configured in the respective
        idmap_{backend} table. List is sorted based in ascending order based on the id range.
        """
        domains = await self.middleware.call('idmap.domaintobackend.query')
        configured_domains = []
        for domain in domains:
            b = await self.middleware.call(
                f'idmap.{domain["idmap_backend"].lower()}.query',
                [('domain.idmap_domain_name', '=',
                  domain['domain']['idmap_domain_name'])])
            for entry in b:
                entry.pop('domain')
                entry.pop('id')
                domain.update({'backend_data': entry})
                configured_domains.append(domain)

        return sorted(configured_domains,
                      key=lambda domain: domain['backend_data']['range_high'])

    @private
    async def get_next_idmap_range(self):
        """
        Increment next high range by 100,000,000 ids. This number has
        to accomodate the highest available rid value for a domain.
        Configured idmap ranges _must_ not overlap.
        """
        sorted_idmaps = await self.get_configured_idmap_domains()
        low_range = sorted_idmaps[-1]['backend_data']['range_high'] + 1
        high_range = sorted_idmaps[-1]['backend_data']['range_high'] + 100000000
        return (low_range, high_range)

    @accepts()
    async def clear_idmap_cache(self):
        """
        Stop samba, remove the winbindd_cache.tdb file, start samba, flush samba's cache.
        This should be performed after finalizing idmap changes.
        """
        await self.middleware.call('service.stop', 'smb')
        try:
            os.remove('/var/db/system/samba4/winbindd_cache.tdb')
        except Exception as e:
            self.logger.debug("Failed to remove winbindd_cache.tdb: %s" % e)

        await self.middleware.call('service.start', 'cifs')
        gencache_flush = await run(['net', 'cache', 'flush'], check=False)
        if gencache_flush.returncode != 0:
            raise CallError(
                f'Attempt to flush gencache failed with error: {gencache_flush.stderr.decode().strip()}'
            )

    @private
    async def autodiscover_trusted_domains(self):
        smb = await self.middleware.call('smb.config')
        wbinfo = await run(['/usr/local/bin/wbinfo', '-m', '--verbose'],
                           check=False)
        if wbinfo.returncode != 0:
            raise CallError(
                f'wbinfo -m failed with error: {wbinfo.stderr.decode().strip()}'
            )

        for entry in wbinfo.stdout.decode().splitlines():
            c = entry.split()
            if len(c) == 6 and c[0] != smb['workgroup']:
                await self.middleware.call('idmap.domain.create', {
                    'name': c[0],
                    'dns_domain_name': c[1]
                })
示例#17
0
class RsyncModService(SharingService):

    share_task_type = 'Rsync Module'

    class Config:
        datastore = 'services.rsyncmod'
        datastore_prefix = 'rsyncmod_'
        datastore_extend = 'rsyncmod.rsync_mod_extend'

    @private
    async def rsync_mod_extend(self, data):
        data['hostsallow'] = data['hostsallow'].split()
        data['hostsdeny'] = data['hostsdeny'].split()
        data['mode'] = data['mode'].upper()
        return data

    @private
    async def common_validation(self, data, schema_name):
        verrors = ValidationErrors()

        await self.validate_path_field(data, schema_name, verrors)

        for entity in ('user', 'group'):
            value = data.get(entity)
            try:
                await self.middleware.call(f'{entity}.get_{entity}_obj',
                                           {f'{entity}name': value})
            except Exception:
                verrors.add(f'{schema_name}.{entity}',
                            f'Please specify a valid {entity}')

        verrors.check()

        data['hostsallow'] = ' '.join(data['hostsallow'])
        data['hostsdeny'] = ' '.join(data['hostsdeny'])
        data['mode'] = data['mode'].lower()

        return data

    @accepts(
        Dict(
            'rsyncmod_create',
            Bool('enabled', default=True),
            Str('name', validators=[Match(r'[^/\]]')]),
            Str('comment'),
            Str('path', required=True, max_length=RSYNC_PATH_LIMIT),
            Str('mode', enum=['RO', 'RW', 'WO']),
            Int('maxconn'),
            Str('user', default='nobody'),
            Str('group', default='nobody'),
            List('hostsallow', items=[Str('hostsallow')], default=[]),
            List('hostsdeny', items=[Str('hostdeny')], default=[]),
            Str('auxiliary', max_length=None),
            register=True,
        ))
    async def do_create(self, data):
        """
        Create a Rsyncmod module.

        `path` represents the path to a dataset. Path length is limited to 1023 characters maximum as per the limit
        enforced by FreeBSD. It is possible that we reach this max length recursively while transferring data. In that
        case, the user must ensure the maximum path will not be too long or modify the recursed path to shorter
        than the limit.

        `maxconn` is an integer value representing the maximum number of simultaneous connections. Zero represents
        unlimited.

        `hostsallow` is a list of patterns to match hostname/ip address of a connecting client. If list is empty,
        all hosts are allowed.

        `hostsdeny` is a list of patterns to match hostname/ip address of a connecting client. If the pattern is
        matched, access is denied to the client. If no client should be denied, this should be left empty.

        `auxiliary` attribute can be used to pass on any additional parameters from rsyncd.conf(5).
        """

        data = await self.common_validation(data, 'rsyncmod_create')

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self._get_instance(data['id'])

    @accepts(Int('id'),
             Patch('rsyncmod_create', 'rsyncmod_update', ('attr', {
                 'update': True
             })))
    async def do_update(self, id, data):
        """
        Update Rsyncmod module of `id`.
        """
        module = await self.get_instance(id)
        module.update(data)

        module = await self.common_validation(module, 'rsyncmod_update')
        module.pop(self.locked_field)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, module,
                                   {'prefix': self._config.datastore_prefix})

        await self._service_change('rsync', 'reload')

        return await self.get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete Rsyncmod module of `id`.
        """
        return await self.middleware.call('datastore.delete',
                                          self._config.datastore, id)
示例#18
0
文件: idmap.py 项目: mike0615/freenas
class IdmapDomainService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domain'
        datastore_prefix = 'idmap_domain_'
        namespace = 'idmap.domain'

    @accepts(
        Dict('idmap_domain_create',
             Str('name', required=True),
             Str('DNS_domain_name'),
             register=True))
    async def do_create(self, data):
        """
        Create a new IDMAP domain. These domains must be unique. This table
        will be automatically populated after joining an Active Directory domain
        if "allow trusted domains" is set to True in the AD service configuration.
        There are three default system domains: DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP, DS_TYPE_DEFAULT_DOMAIN.
        The system domains correspond with the idmap settings under Active Directory, LDAP, and SMB
        respectively.
        `name` the pre-windows 2000 domain name.
        `DNS_domain_name` DNS name of the domain.
        """
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domain_create", "idmap_domain_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update a domain by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child('idmap_domain_update', await self._validate(new))

        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete a domain by id. Deletion of default system domains is not permitted.
        """
        if id <= 5:
            entry = await self._get_instance(id)
            raise CallError(
                f'Deleting system idmap domain [{entry["name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)

    @private
    async def _validate(self, data):
        verrors = ValidationErrors()
        if data['id'] <= dstype['DS_TYPE_DEFAULT_DOMAIN'].value:
            verrors.add(
                'id',
                f'Modifying system idmap domain [{data["name"]}] is not permitted.'
            )
        return verrors
示例#19
0
# without the express permission of iXsystems.

from datetime import datetime, timedelta
import errno
import json
import os

from middlewared.schema import accepts, Bool, Dict, Str
from middlewared.service import cli_private, job, private, Service
import middlewared.sqlalchemy as sa

EULA_FILE = '/usr/local/share/truenas/eula.html'
EULA_PENDING_PATH = "/data/truenas-eula-pending"

user_attrs = [
    Str('first_name'),
    Str('last_name'),
    Str('title'),
    Str('office_phone'),
    Str('mobile_phone'),
    Str('primary_email'),
    Str('secondary_email'),
    Str('address'),
    Str('city'),
    Str('state'),
    Str('zip'),
    Str('country'),
]


class TruenasCustomerInformationModel(sa.Model):
示例#20
0
文件: idmap.py 项目: mike0615/freenas
class IdmapDomainBackendService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_domaintobackend'
        datastore_prefix = 'idmap_dtb_'
        namespace = 'idmap.domaintobackend'

    @accepts(
        Dict('idmap_domaintobackend_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Str('idmap_backend',
                 enum=[
                     'AD', 'AUTORID', 'FRUIT', 'LDAP', 'NSS', 'RFC2307', 'RID',
                     'SCRIPT', 'TDB'
                 ]),
             register=True))
    async def do_create(self, data):
        """
        Set an idmap backend for a domain.
        `domain` dictionary containing domain information. Has one-to-one relationship with idmap_domain entries.
        `idmap_backed` type of idmap backend to use for the domain.

        Create entry for domain in the respective idmap backend table if one does not exist.
        """
        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        verrors = ValidationErrors()
        if data['domain'] in [
                dstype.DS_TYPE_LDAP.value, dstype.DS_TYPE_DEFAULT_DOMAIN.value
        ]:
            if data['idmap_backend'] not in ['ldap', 'tdb']:
                verrors.add(
                    'domaintobackend_create.idmap_backend',
                    f'idmap backend [{data["idmap_backend"]}] is not appropriate for the system domain type {dstype[data["domain"]]}'
                )
        if verrors:
            raise verrors

        backend_entry_is_present = False
        idmap_data = await self.middleware.call(
            f'idmap.{data["idmap_backend"]}.query')
        for i in idmap_data:
            if not i['domain']:
                continue
            if i['domain']['idmap_domain_name'] == data['domain'][
                    'idmap_domain_name']:
                backend_entry_is_present = True
                break

        if not backend_entry_is_present:
            next_idmap_range = await self.middleware.call(
                'idmap.get_next_idmap_range')
            await self.middleware.call(
                f'idmap.{data["idmap_backend"]}.create', {
                    'domain': {
                        'id': data['domain']['id']
                    },
                    'range_low': next_idmap_range[0],
                    'range_high': next_idmap_range[1]
                })
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_domaintobackend_create",
                   "idmap_domaintobackend_update", ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update idmap to backend mapping by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        new = await self.middleware.call('idmap.common_backend_compress', new)
        verrors = ValidationErrors()
        if new['domain'] in [
                dstype.DS_TYPE_LDAP.value, dstype.DS_TYPE_DEFAULT_DOMAIN.value
        ]:
            if new['idmap_backend'] not in ['ldap', 'tdb']:
                verrors.add(
                    'domaintobackend_create.idmap_backend',
                    f'idmap backend [{new["idmap_backend"]}] is not appropriate for the system domain type {dstype[new["domain"]]}'
                )
        if verrors:
            raise verrors

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        updated_entry = await self._get_instance(id)
        try:
            await self.middleware.call('idmap.get_or_create_idmap_by_domain',
                                       updated_entry['domain']['domain_name'])
        except Exception as e:
            self.logger.debug('Failed to generate new idmap backend: %s', e)

        return updated_entry

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        entry = await self._get_instance(id)
        if entry['domain']['id'] <= dstype['DS_TYPE_DEFAULT_DOMAIN'].value:
            raise CallError(
                f'Deleting mapping for [{entry["domain"]["idmap_domain_name"]}] is not permitted.',
                errno.EPERM)
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
示例#21
0
class DockerImagesService(CRUDService):
    class Config:
        namespace = 'docker.images'

    @filterable
    async def query(self, filters=None, options=None):
        results = []
        if not await self.middleware.call('service.started', 'docker'):
            return results

        async with aiodocker.Docker() as docker:
            for image in await docker.images.list():
                repo_tags = image['RepoTags'] or []
                results.append({
                    'id':
                    image['Id'],
                    'labels':
                    image['Labels'],
                    'repo_tags':
                    repo_tags,
                    'size':
                    image['Size'],
                    'created':
                    datetime.fromtimestamp(int(image['Created'])),
                    'dangling':
                    len(repo_tags) == 1 and repo_tags[0] == '<none>:<none>',
                })
        return filter_list(results, filters, options)

    @accepts(
        Dict(
            'image_pull',
            Dict(
                'docker_authentication',
                Str('username', required=True),
                Str('password', required=True),
                default=None,
                null=True,
            ),
            Str('from_image', required=True),
            Str('tag', default=None, null=True),
        ))
    async def pull(self, data):
        """
        `from_image` is the name of the image to pull. Format for the name is "registry/repo/image" where
        registry may be omitted and it will default to docker registry in this case.

        `tag` specifies tag of the image and defaults to `null`. In case of `null` it will retrieve all the tags
        of the image.

        `docker_authentication` should be specified if image to be retrieved is under a private repository.
        """
        await self.docker_checks()
        async with aiodocker.Docker() as docker:
            try:
                response = await docker.images.pull(
                    from_image=data['from_image'],
                    tag=data['tag'],
                    auth=data['docker_authentication'])
            except aiodocker.DockerError as e:
                raise CallError(f'Failed to pull image: {e.message}')
        return response

    @accepts(Str('id'), Dict(
        'options',
        Bool('force', default=False),
    ))
    async def do_delete(self, id, options):
        """
        `options.force` should be used to force delete an image even if it's in use by a stopped container.
        """
        await self.docker_checks()
        async with aiodocker.Docker() as docker:
            await docker.images.delete(name=id, force=options['force'])

    @private
    async def load_images_from_file(self, path):
        await self.docker_checks()
        if not os.path.exists(path):
            raise CallError(f'"{path}" path does not exist.',
                            errno=errno.ENOENT)

        resp = []
        async with aiodocker.Docker() as client:
            with open(path, 'rb') as f:
                async for i in client.images.import_image(data=f, stream=True):
                    if 'error' in i:
                        raise CallError(
                            f'Unable to load images from file: {i["error"]}')
                    else:
                        resp.append(i)
        return resp

    @private
    async def load_default_images(self):
        await self.load_images_from_file(DEFAULT_DOCKER_IMAGES_PATH)

    @private
    async def docker_checks(self):
        if not await self.middleware.call('service.started', 'docker'):
            raise CallError('Docker service is not running')
示例#22
0
文件: idmap.py 项目: mike0615/freenas
class IdmapADService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_ad'
        datastore_prefix = 'idmap_ad_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.ad'

    @accepts(
        Dict('idmap_ad_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('schema_mode',
                 default='RFC2307',
                 enum=['RFC2307', 'SFU', 'SFU20']),
             Bool('unix_primary_group', default=False),
             Bool('unix_nss_info', default=False),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap backend table.
        `unix_primary_group` If True, the primary group membership is fetched from the LDAP attributes (gidNumber).
        If False, the primary group membership is calculated via the "primaryGroupID" LDAP attribute.

        `unix_nss_info` if True winbind will retrieve the login shell and home directory from the LDAP attributes.
        If False or if the AD LDAP entry lacks the SFU attributes the smb4.conf parameters `template shell` and `template homedir` are used.

        `schema_mode` Defines the schema that idmap_ad should use when querying Active Directory regarding user and group information.
        This can be either the RFC2307 schema support included in Windows 2003 R2 or the Service for Unix (SFU) schema.
        For SFU 3.0 or 3.5 please choose "SFU", for SFU 2.0 please choose "SFU20". The behavior of primary group membership is
        controlled by the unix_primary_group option.
        """
        verrors = ValidationErrors()
        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        verrors.add_child(
            'idmap_ad_create', await
            self.middleware.call('idmap._common_validate', data))
        if verrors:
            raise verrors

        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_ad_create", "idmap_ad_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_ad_update', await
            self.middleware.call('idmap._common_validate', new))
        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
示例#23
0
class SMBService(SystemServiceService):
    class Config:
        service = 'cifs'
        service_verb = 'restart'
        datastore = 'services.cifs'
        datastore_extend = 'smb.smb_extend'
        datastore_prefix = 'cifs_srv_'

    @private
    async def smb_extend(self, smb):
        """Extend smb for netbios."""
        if not await self.middleware.call('notifier.is_freenas'
                                          ) and await self.middleware.call(
                                              'notifier.failover_node') == 'B':
            smb['netbiosname'] = smb['netbiosname_b']

        for i in ('aio_enable', 'aio_rs', 'aio_ws'):
            smb.pop(i, None)

        smb['loglevel'] = LOGLEVEL_MAP.get(smb['loglevel'])

        return smb

    async def __validate_netbios_name(self, name):
        return RE_NETBIOSNAME.match(name)

    async def unixcharset_choices(self):
        return await self.generate_choices([
            'UTF-8', 'ISO-8859-1', 'ISO-8859-15', 'GB2312', 'EUC-JP', 'ASCII'
        ])

    @private
    async def generate_choices(self, initial):
        def key_cp(encoding):
            cp = re.compile(
                "(?P<name>CP|GB|ISO-8859-|UTF-)(?P<num>\d+)").match(encoding)
            if cp:
                return tuple((cp.group('name'), int(cp.group('num'), 10)))
            else:
                return tuple((encoding, float('inf')))

        charset = await self.common_charset_choices()
        return {
            v: v
            for v in
            [c for c in sorted(charset, key=key_cp) if c not in initial] +
            initial
        }

    @private
    async def common_charset_choices(self):
        def check_codec(encoding):
            try:
                return encoding.upper() if codecs.lookup(encoding) else False
            except LookupError:
                return False

        proc = await Popen(['/usr/bin/iconv', '-l'],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
        output = (await proc.communicate())[0].decode()

        encodings = set()
        for line in output.splitlines():
            enc = [e for e in line.split() if check_codec(e)]

            if enc:
                cp = enc[0]
                for e in enc:
                    if e in ('UTF-8', 'ASCII', 'GB2312', 'HZ-GB-2312',
                             'CP1361'):
                        cp = e
                        break

                encodings.add(cp)

        return encodings

    @accepts(
        Dict(
            'smb_update',
            Str('netbiosname'),
            Str('netbiosname_b'),
            Str('netbiosalias'),
            Str('workgroup'),
            Str('description'),
            Str('unixcharset'),
            Str('loglevel',
                enum=['NONE', 'MINIMUM', 'NORMAL', 'FULL', 'DEBUG']),
            Bool('syslog'),
            Bool('localmaster'),
            Bool('domain_logons'),
            Bool('timeserver'),
            Str('guest'),
            Str('filemask'),
            Str('dirmask'),
            Bool('nullpw'),
            Bool('unixext'),
            Bool('zeroconf'),
            Bool('hostlookup'),
            Bool('allow_execute_always'),
            Bool('obey_pam_restrictions'),
            Bool('ntlmv1_auth'),
            List('bindip', items=[IPAddr('ip')], default=[]),
            Str('smb_options'),
            update=True,
        ))
    async def do_update(self, data):
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if data.get('unixcharset') and data[
                'unixcharset'] not in await self.unixcharset_choices():
            verrors.add('smb_update.unixcharset',
                        'Please provide a valid value for unixcharset')

        for i in ('workgroup', 'netbiosname', 'netbiosname_b', 'netbiosalias'):
            if i not in data or not data[i]:
                continue
            if not await self.__validate_netbios_name(data[i]):
                verrors.add(f'smb_update.{i}', 'Invalid NetBIOS name')

        if new['netbiosname'] and new['netbiosname'].lower(
        ) == new['workgroup'].lower():
            verrors.add('smb_update.netbiosname',
                        'NetBIOS and Workgroup must be unique')

        for i in ('filemask', 'dirmask'):
            if i not in data or not data[i]:
                continue
            try:
                if int(data[i], 8) & ~0o11777:
                    raise ValueError('Not an octet')
            except (ValueError, TypeError):
                verrors.add(f'smb_update.{i}', 'Not a valid mask')

        if verrors:
            raise verrors

        # TODO: consider using bidict
        for k, v in LOGLEVEL_MAP.items():
            if new['loglevel'] == v:
                new['loglevel'] = k
                break

        await self._update_service(old, new)

        return await self.config()
示例#24
0
文件: idmap.py 项目: mike0615/freenas
class IdmapLDAPService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_ldap'
        datastore_prefix = 'idmap_ldap_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.ldap'

    @accepts(
        Dict('idmap_ldap_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('base_dn'),
             Str('user_dn'),
             Str('url'),
             Str('ssl', default='OFF', enum=['OFF', 'ON', 'START_TLS']),
             Int('certificate'),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap backend table.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_ldap_create', await
            self.middleware.call('idmap._common_validate', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_ldap_create", "idmap_ldap_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_ldap_update', await
            self.middleware.call('idmap._common_validate', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
示例#25
0
class DatastoreService(Service):
    def _filters_to_queryset(self, filters, field_suffix=None):
        opmap = {
            '=': 'exact',
            '!=': 'exact',
            '>': 'gt',
            '>=': 'gte',
            '<': 'lt',
            '<=': 'lte',
            '~': 'regex',
        }

        rv = []
        for f in filters:
            if not isinstance(f, (list, tuple)):
                raise ValueError('Filter must be a list: {0}'.format(f))
            if len(f) == 3:
                name, op, value = f
                if field_suffix:
                    name = field_suffix + name
                if op not in opmap:
                    raise Exception("Invalid operation: {0}".format(op))
                q = Q(**{'{0}__{1}'.format(name, opmap[op]): value})
                if op == '!=':
                    q.negate()
                rv.append(q)
            elif len(f) == 2:
                op, value = f
                if op == 'OR':
                    or_value = None
                    for value in self._filters_to_queryset(
                            value, field_suffix=field_suffix):
                        if or_value is None:
                            or_value = value
                        else:
                            or_value |= value
                    rv.append(or_value)
                else:
                    raise ValueError('Invalid operation: {0}'.format(op))
            else:
                raise Exception("Invalid filter {0}".format(f))
        return rv

    def __get_model(self, name):
        """Helper method to get Model for given name
        e.g. network.interfaces -> Interfaces
        """
        app, model = name.split('.', 1)
        return apps.get_model(app, model)

    def __queryset_serialize(self, qs, extend=None, field_suffix=None):
        for i in qs:
            yield django_modelobj_serialize(self.middleware,
                                            i,
                                            extend=extend,
                                            field_suffix=field_suffix)

    @accepts(
        Str('name'),
        List('query-filters', register=True),
        Dict(
            'query-options',
            Str('extend'),
            Dict('extra', additional_attrs=True),
            List('order_by'),
            Bool('count'),
            Bool('get'),
            Str('suffix'),
            register=True,
        ),
    )
    def query(self, name, filters=None, options=None):
        """Query for items in a given collection `name`.

        `filters` is a list which each entry can be in one of the following formats:

            entry: simple_filter | conjuntion
            simple_filter: '[' attribute_name, OPERATOR, value ']'
            conjunction: '[' CONJUNTION, '[' simple_filter (',' simple_filter)* ']]'

            OPERATOR: ('=' | '!=' | '>' | '>=' | '<' | '<=' | '~' )
            CONJUNCTION: 'OR'

        e.g.

        `['OR', [ ['username', '=', 'root' ], ['uid', '=', 0] ] ]`

        `[ ['username', '=', 'root' ] ]`

        .. examples(websocket)::

          Querying for username "root" and returning a single item:

            :::javascript
            {
              "id": "d51da71b-bb48-4b8b-a8f7-6046fcc892b4",
              "msg": "method",
              "method": "datastore.query",
              "params": ["account.bsdusers", [ ["username", "=", "root" ] ], {"get": true}]
            }
        """
        model = self.__get_model(name)
        if options is None:
            options = {}
        else:
            # We do not want to make changes to original options
            # which might happen with "suffix"
            options = options.copy()

        qs = model.objects.all()

        extra = options.get('extra')
        if extra:
            qs = qs.extra(**extra)

        suffix = options.get('suffix')

        if filters:
            qs = qs.filter(*self._filters_to_queryset(filters, suffix))

        order_by = options.get('order_by')
        if order_by:
            if suffix:
                # Do not change original order_by
                order_by = order_by[:]
                for i, order in enumerate(order_by):
                    if order.startswith('-'):
                        order_by[i] = '-' + suffix + order[1:]
                    else:
                        order_by[i] = suffix + order
            qs = qs.order_by(*order_by)

        if options.get('count') is True:
            return qs.count()

        result = list(
            self.__queryset_serialize(qs,
                                      extend=options.get('extend'),
                                      field_suffix=options.get('suffix')))

        if options.get('get') is True:
            return result[0]
        return result

    @accepts(Str('name'), Ref('query-options'))
    def config(self, name, options=None):
        """
        Get configuration settings object for a given `name`.

        This is a shortcut for `query(name, {"get": true})`.
        """
        if options is None:
            options = {}
        options['get'] = True
        return self.query(name, None, options)

    @accepts(Str('name'), Dict('data', additional_attrs=True))
    def insert(self, name, data):
        """
        Insert a new entry to `name`.
        """
        model = self.__get_model(name)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        obj = model(**data)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'), Dict('data', additional_attrs=True))
    def update(self, name, id, data):
        """
        Update an entry `id` in `name`.
        """
        model = self.__get_model(name)
        obj = model.objects.get(pk=id)
        for field in model._meta.fields:
            if field.name not in data:
                continue
            if isinstance(field, ForeignKey):
                data[field.name] = field.rel.to.objects.get(
                    pk=data[field.name])
        for k, v in list(data.items()):
            setattr(obj, k, v)
        obj.save()
        return obj.pk

    @accepts(Str('name'), Int('id'))
    def delete(self, name, id):
        """
        Delete an entry `id` in `name`.
        """
        model = self.__get_model(name)
        model.objects.get(pk=id).delete()
        return True

    @private
    def sql(self, query, params=None):
        cursor = connection.cursor()
        rv = None
        try:
            if params is None:
                cursor.executelocal(query)
            else:
                cursor.executelocal(query, params)
            rv = cursor.fetchall()
        finally:
            cursor.close()
        return rv
示例#26
0
文件: idmap.py 项目: mike0615/freenas
class IdmapRFC2307Service(CRUDService):
    """
    In the rfc2307 backend range acts as a filter. Anything falling outside of it is ignored.
    If no user_dn is specified, then an anonymous bind is performed.
    ldap_url is only required when using a standalone server.
    """
    class Config:
        datastore = 'directoryservice.idmap_rfc2307'
        datastore_prefix = 'idmap_rfc2307_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.rfc2307'

    @accepts(
        Dict('idmap_rfc2307_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('ldap_server', default='AD', enum=['AD', 'STAND-ALONE']),
             Str('bind_path_user'),
             Str('bind_path_group'),
             Bool('user_cn', default=False),
             Bool('cn_realm', default=False),
             Str('ldap_domain'),
             Str('ldap_url'),
             Str('ldap_user_dn'),
             Str('ldap_user_dn_password'),
             Str('ldap_realm'),
             Str('ssl', default='OFF', enum=['OFF', 'ON', 'START_TLS']),
             Int('certificate'),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap_rfc2307 backend table.

        `ldap_server` defines the type of LDAP server to use. This can either be an LDAP server provided
        by the Active Directory Domain (ad) or a stand-alone LDAP server.

        `bind_path_user` specfies the search base where user objects can be found in the LDAP server.

        `bind_path_group` specifies the search base where group objects can be found in the LDAP server.

        `user_cn` query cn attribute instead of uid attribute for the user name in LDAP.

        `realm` append @realm to cn for groups (and users if user_cn is set) in LDAP queries.

        `ldmap_domain` when using the LDAP server in the Active Directory server, this allows one to
        specify the domain where to access the Active Directory server. This allows using trust relationships
        while keeping all RFC 2307 records in one place. This parameter is optional, the default is to access
        the AD server in the current domain to query LDAP records.

        `ldap_url` when using a stand-alone LDAP server, this parameter specifies the LDAP URL for accessing the LDAP server.

        `ldap_user_dn` defines the user DN to be used for authentication.

        `realm` defines the realm to use in the user and group names. This is only required when using cn_realm together with
         a stand-alone ldap server.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rfc2307_create', await
            self.middleware.call('idmap._common_validate', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_nss_create", "idmap_nss_update", ("attr", {
                 "update": True
             })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_rfc2307_update', await
            self.middleware.call('idmap._common_validate', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
示例#27
0
class ChartReleaseService(Service):

    class Config:
        namespace = 'chart.release'

    @private
    async def scale_down_workloads_before_snapshot(self, job, release):
        resources = []
        pod_mapping = await self.middleware.call('chart.release.get_workload_to_pod_mapping', release['namespace'])
        pods_to_watch_for = []
        for resource in SCALEABLE_RESOURCES:
            for workload in await self.middleware.call(
                f'k8s.{resource.name.lower()}.query', [
                    [f'metadata.annotations.{SCALE_DOWN_ANNOTATION["key"]}', 'in',
                     SCALE_DOWN_ANNOTATION['value']],
                    ['metadata.namespace', '=', release['namespace']],
                ]
            ):
                resources.append({
                    'replica_count': 0,
                    'type': resource.name,
                    'name': workload['metadata']['name'],
                })
                pods_to_watch_for.extend(pod_mapping[workload['metadata']['uid']])

        if not resources:
            return

        job.set_progress(35, f'Scaling down {", ".join([r["name"] for r in resources])} workload(s)')
        await self.middleware.call('chart.release.scale_workloads', release['id'], resources)
        await self.middleware.call(
            'chart.release.wait_for_pods_to_terminate', release['namespace'], [
                ['metadata.name', 'in', pods_to_watch_for],
            ]
        )
        job.set_progress(40, 'Successfully scaled down workload(s)')

    @accepts(
        Str('release_name'),
        Dict(
            'upgrade_options',
            Dict('values', additional_attrs=True),
            Str('item_version', default='latest'),
        )
    )
    @returns(Ref('chart_release_entry'))
    @job(lock=lambda args: f'chart_release_upgrade_{args[0]}')
    async def upgrade(self, job, release_name, options):
        """
        Upgrade `release_name` chart release.

        `upgrade_options.item_version` specifies to which item version chart release should be upgraded to.

        System will update container images being used by `release_name` chart release as a chart release
        upgrade is not considered complete until the images in use have also been updated to latest versions.

        During upgrade, `upgrade_options.values` can be specified to apply configuration changes for configuration
        changes for the chart release in question.

        When chart version is upgraded, system will automatically take a snapshot of `ix_volumes` in question
        which can be used to rollback later on.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.get_instance', release_name)
        if not release['update_available'] and not release['container_images_update_available']:
            raise CallError('No update is available for chart release')

        # We need to update container images before upgrading chart version as it's possible that the chart version
        # in question needs newer image hashes.
        job.set_progress(10, 'Updating container images')
        await (
            await self.middleware.call('chart.release.pull_container_images', release_name, {'redeploy': False})
        ).wait(raise_error=True)
        job.set_progress(30, 'Updated container images')

        await self.scale_down_workloads_before_snapshot(job, release)

        # If a snapshot of the volumes already exist with the same name in case of a failed upgrade, we will remove
        # it as we want the current point in time being reflected in the snapshot
        # TODO: Remove volumes/ix_volumes check in next release as we are going to do a recursive snapshot
        #  from parent volumes ds moving on
        for filesystem in ('volumes', 'volumes/ix_volumes'):
            volumes_ds = os.path.join(release['dataset'], filesystem)
            snap_name = f'{volumes_ds}@{release["version"]}'
            if await self.middleware.call('zfs.snapshot.query', [['id', '=', snap_name]]):
                await self.middleware.call('zfs.snapshot.delete', snap_name, {'recursive': True})

        await self.middleware.call(
            'zfs.snapshot.create', {
                'dataset': os.path.join(release['dataset'], 'volumes'), 'name': release['version'], 'recursive': True
            }
        )
        job.set_progress(50, 'Created snapshot for upgrade')

        if release['update_available']:
            await self.upgrade_chart_release(job, release, options)
        else:
            await (await self.middleware.call('chart.release.redeploy', release_name)).wait(raise_error=True)

        chart_release = await self.middleware.call('chart.release.get_instance', release_name)
        self.middleware.send_event('chart.release.query', 'CHANGED', id=release_name, fields=chart_release)

        await self.chart_releases_update_checks_internal([['id', '=', release_name]])

        job.set_progress(100, 'Upgrade complete for chart release')

        return chart_release

    @accepts(
        Str('release_name'),
        Dict(
            'options',
            Str('item_version', default='latest', empty=False)
        )
    )
    @returns(Dict(
        Bool('image_update_available', required=True),
        Bool('item_update_available', required=True),
        Dict(
            'container_images_to_update', additional_attrs=True,
            description='Dictionary of container image(s) which have an update available against the same tag',
        ),
        Str('latest_version'),
        Str('latest_human_version'),
        Str('upgrade_version'),
        Str('upgrade_human_version'),
        Str('changelog', max_length=None, null=True),
        List('available_versions_for_upgrade', items=[Dict(
            'version_info',
            Str('version', required=True),
            Str('human_version', required=True),
        )])
    ))
    async def upgrade_summary(self, release_name, options):
        """
        Retrieve upgrade summary for `release_name` which will include which container images will be updated
        and changelog for `options.item_version` chart version specified if applicable. If only container images
        need to be updated, changelog will be `null`.

        If chart release `release_name` does not require an upgrade, an error will be raised.
        """
        release = await self.middleware.call(
            'chart.release.query', [['id', '=', release_name]], {'extra': {'retrieve_resources': True}, 'get': True}
        )
        if not release['update_available'] and not release['container_images_update_available']:
            raise CallError('No update is available for chart release', errno=errno.ENOENT)

        version_info = {
            'latest_version': release['chart_metadata']['version'],
            'upgrade_version': release['chart_metadata']['version'],
            'latest_human_version': release['human_version'],
            'upgrade_human_version': release['human_version'],
        }
        changelog = None
        all_newer_versions = []
        if release['update_available']:
            available_items = await self.get_versions(release, options)
            latest_item = available_items['latest_version']
            upgrade_version = available_items['specified_version']
            version_info.update({
                'latest_version': latest_item['version'],
                'latest_human_version': latest_item['human_version'],
                'upgrade_version': upgrade_version['version'],
                'upgrade_human_version': upgrade_version['human_version'],
            })
            changelog = upgrade_version['changelog']
            all_newer_versions = [
                {
                    'version': v['version'],
                    'human_version': v['human_version'],
                } for v in available_items['versions'].values()
                if parse_version(v['version']) > parse_version(release['chart_metadata']['version'])
            ]

        return {
            'container_images_to_update': {
                k: v for k, v in release['resources']['container_images'].items() if v['update_available']
            },
            'changelog': changelog,
            'available_versions_for_upgrade': all_newer_versions,
            'item_update_available': release['update_available'],
            'image_update_available': release['container_images_update_available'],
            **version_info,
        }

    @private
    async def get_version(self, release, options):
        return (await self.get_versions(release, options))['specified_version']

    @private
    async def get_versions(self, release, options):
        current_chart = release['chart_metadata']
        chart = current_chart['name']
        item_details = await self.middleware.call('catalog.get_item_details', chart, {
            'catalog': release['catalog'],
            'train': release['catalog_train'],
        })

        new_version = options['item_version']
        if new_version == 'latest':
            new_version = await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions', item_details['versions']
            )

        if new_version not in item_details['versions']:
            raise CallError(f'Unable to locate specified {new_version!r} item version.')

        verrors = ValidationErrors()
        if parse_version(new_version) <= parse_version(current_chart['version']):
            verrors.add(
                'upgrade_options.item_version',
                f'Upgrade version must be greater than {current_chart["version"]!r} current version.'
            )

        verrors.check()

        return {
            'specified_version': item_details['versions'][new_version],
            'versions': item_details['versions'],
            'latest_version': item_details['versions'][await self.middleware.call(
                'chart.release.get_latest_version_from_item_versions', item_details['versions']
            )]
        }

    @private
    async def upgrade_chart_release(self, job, release, options):
        release_orig = copy.deepcopy(release)
        release_name = release['name']

        catalog_item = await self.get_version(release, options)
        await self.middleware.call('catalog.version_supported_error_check', catalog_item)

        config = await self.middleware.call('chart.release.upgrade_values', release, catalog_item['location'])
        release_orig['config'] = config

        # We will be performing validation for values specified. Why we want to allow user to specify values here
        # is because the upgraded catalog item version might have different schema which potentially means that
        # upgrade won't work or even if new k8s are resources are created/deployed, they won't necessarily function
        # as they should because of changed params or expecting new params
        # One tricky bit which we need to account for first is removing any key from current configured values
        # which the upgraded release will potentially not support. So we can safely remove those as otherwise
        # validation will fail as new schema does not expect those keys.
        config = clean_values_for_upgrade(config, catalog_item['schema']['questions'])
        config.update(options['values'])

        config, context = await self.middleware.call(
            'chart.release.normalise_and_validate_values', catalog_item, config, False, release['dataset'],
            release_orig,
        )
        job.set_progress(50, 'Initial validation complete for upgrading chart version')

        # We have validated configuration now

        chart_path = os.path.join(release['path'], 'charts', catalog_item['version'])
        await self.middleware.run_in_thread(shutil.rmtree, chart_path, ignore_errors=True)
        await self.middleware.run_in_thread(shutil.copytree, catalog_item['location'], chart_path)

        await self.middleware.call('chart.release.perform_actions', context)

        # Let's update context options to reflect that an upgrade is taking place and from which version to which
        # version it's happening.
        # Helm considers simple config change as an upgrade as well, and we have no way of determining the old/new
        # chart versions during helm upgrade in the helm template, hence the requirement for a context object.
        config[CONTEXT_KEY_NAME].update({
            **get_action_context(release_name),
            'operation': 'UPGRADE',
            'isUpgrade': True,
            'upgradeMetadata': {
                'oldChartVersion': release['chart_metadata']['version'],
                'newChartVersion': catalog_item['version'],
                'preUpgradeRevision': release['version'],
            }
        })

        job.set_progress(60, 'Upgrading chart release version')

        await self.middleware.call('chart.release.helm_action', release_name, chart_path, config, 'upgrade')
        await self.middleware.call('chart.release.refresh_events_state', release_name)

    @private
    def upgrade_values(self, release, new_version_path):
        config = copy.deepcopy(release['config'])
        chart_version = release['chart_metadata']['version']
        migration_path = os.path.join(new_version_path, 'migrations')
        migration_files = [os.path.join(migration_path, k) for k in (f'migrate_from_{chart_version}', 'migrate')]
        if not os.path.exists(migration_path) or all(not os.access(p, os.X_OK) for p in migration_files):
            return config

        # This is guaranteed to exist based on above check
        file_path = next(f for f in migration_files if os.access(f, os.X_OK))

        with tempfile.NamedTemporaryFile(mode='w+') as f:
            f.write(json.dumps(config))
            f.flush()
            cp = subprocess.Popen([file_path, f.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            stdout, stderr = cp.communicate()

        if cp.returncode:
            raise CallError(f'Failed to apply migration: {stderr.decode()}')

        if stdout:
            # We add this as a safety net in case something went wrong with the migration and we get a null response
            # or the chart dev mishandled something - although we don't suppress any exceptions which might be raised
            config = json.loads(stdout.decode())

        return config

    @periodic(interval=86400)
    @private
    async def periodic_chart_releases_update_checks(self):
        sync_job = await self.middleware.call('catalog.sync_all')
        await sync_job.wait()
        if not await self.middleware.call('service.started', 'kubernetes'):
            return

        await self.chart_releases_update_checks_internal()

    @private
    async def chart_releases_update_checks_internal(self, chart_releases_filters=None):
        chart_releases_filters = chart_releases_filters or []
        # Chart release wrt alerts will be considered valid for upgrade/update if either there's a newer
        # catalog item version available or any of the images it's using is outdated

        catalog_items = {
            f'{c["id"]}_{train}_{item}': c['trains'][train][item]
            for c in await self.middleware.call('catalog.query', [], {'extra': {'item_details': True}})
            for train in c['trains'] for item in c['trains'][train]
        }
        for application in await self.middleware.call('chart.release.query', chart_releases_filters):
            if application['container_images_update_available']:
                await self.middleware.call('alert.oneshot_create', 'ChartReleaseUpdate', application)
                continue

            app_id = f'{application["catalog"]}_{application["catalog_train"]}_{application["chart_metadata"]["name"]}'
            catalog_item = catalog_items.get(app_id)
            if not catalog_item:
                continue

            await self.chart_release_update_check(catalog_item, application)

        container_config = await self.middleware.call('container.config')
        if container_config['enable_image_updates']:
            asyncio.ensure_future(self.middleware.call('container.image.check_update'))

    @private
    async def chart_release_update_check(self, catalog_item, application):
        latest_version = catalog_item['latest_version']
        if not latest_version:
            return

        if parse_version(latest_version) > parse_version(application['chart_metadata']['version']):
            await self.middleware.call('alert.oneshot_create', 'ChartReleaseUpdate', application)
        else:
            await self.middleware.call('alert.oneshot_delete', 'ChartReleaseUpdate', application['id'])

    @accepts(
        Str('release_name'),
        Dict(
            'pull_container_images_options',
            Bool('redeploy', default=True),
        )
    )
    @returns(Dict(
        'container_images', additional_attrs=True,
        description='Dictionary of container image(s) with container image tag as key and update status as value',
        example={
            'plexinc/pms-docker:1.23.2.4656-85f0adf5b': 'Updated image',
        }
    ))
    @job(lock=lambda args: f'pull_container_images{args[0]}')
    async def pull_container_images(self, job, release_name, options):
        """
        Update container images being used by `release_name` chart release.

        `redeploy` when set will redeploy pods which will result in chart release using newer updated versions of
        the container images.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        images = [
            {'orig_tag': tag, 'from_image': tag.rsplit(':', 1)[0], 'tag': tag.rsplit(':', 1)[-1]}
            for tag in (await self.middleware.call(
                'chart.release.query', [['id', '=', release_name]],
                {'extra': {'retrieve_resources': True}, 'get': True}
            ))['resources']['container_images']
        ]
        results = {}

        bulk_job = await self.middleware.call(
            'core.bulk', 'container.image.pull', [
                [{'from_image': image['from_image'], 'tag': image['tag']}]
                for image in images
            ]
        )
        await bulk_job.wait()
        if bulk_job.error:
            raise CallError(f'Failed to update container images for {release_name!r} chart release: {bulk_job.error}')

        for tag, status in zip(images, bulk_job.result):
            if status['error']:
                results[tag['orig_tag']] = f'Failed to pull image: {status["error"]}'
            else:
                results[tag['orig_tag']] = 'Updated image'

        if options['redeploy']:
            await job.wrap(await self.middleware.call('chart.release.redeploy', release_name))

        return results

    @private
    async def clear_update_alerts_for_all_chart_releases(self):
        for chart_release in await self.middleware.call('chart.release.query'):
            await self.middleware.call('alert.oneshot_delete', 'ChartReleaseUpdate', chart_release['id'])
示例#28
0
文件: idmap.py 项目: mike0615/freenas
class IdmapScriptService(CRUDService):
    class Config:
        datastore = 'directoryservice.idmap_script'
        datastore_prefix = 'idmap_script_'
        datastore_extend = 'idmap.common_backend_extend'
        namespace = 'idmap.script'

    @accepts(
        Dict('idmap_script_create',
             Dict(
                 'domain',
                 Int('id'),
                 Str('idmap_domain_name'),
                 Str('idmap_domain_dns_domain_name'),
             ),
             Int('range_low',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Int('range_high',
                 required=True,
                 validators=[Range(min=1000, max=2147483647)]),
             Str('script'),
             register=True))
    async def do_create(self, data):
        """
        Create an entry in the idmap backend table.
        `script` full path to the script or program that generates the mappings.
        """
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_script_create', await
            self.middleware.call('idmap._common_validate', data))
        if verrors:
            raise verrors

        data = await self.middleware.call('idmap.common_backend_compress',
                                          data)
        data["id"] = await self.middleware.call(
            "datastore.insert",
            self._config.datastore,
            data,
            {"prefix": self._config.datastore_prefix},
        )
        return await self._get_instance(data['id'])

    @accepts(Int('id', required=True),
             Patch("idmap_script_create", "idmap_script_update",
                   ("attr", {
                       "update": True
                   })))
    async def do_update(self, id, data):
        """
        Update an entry in the idmap backend table by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)
        verrors = ValidationErrors()
        verrors.add_child(
            'idmap_script_update', await
            self.middleware.call('idmap._common_validate', new))

        if verrors:
            raise verrors

        new = await self.middleware.call('idmap.common_backend_compress', new)
        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})
        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete idmap to backend mapping by id
        """
        await self.middleware.call("datastore.delete", self._config.datastore,
                                   id)
示例#29
0
class AFPService(SystemServiceService):
    class Config:
        service = 'afp'
        datastore_extend = 'afp.extend'
        datastore_prefix = 'afp_srv_'

    @private
    async def extend(self, afp):
        for i in ('map_acls', 'chmod_request'):
            afp[i] = afp[i].upper()
        return afp

    @private
    async def compress(self, afp):
        for i in ('map_acls', 'chmod_request'):
            value = afp.get(i)
            if value:
                afp[i] = value.lower()
        return afp

    @accepts(
        Dict('afp_update',
             Bool('guest'),
             Str('guest_user'),
             List('bindip', items=[Str('ip', validators=[IpAddress()])]),
             Int('connections_limit', validators=[Range(min=1, max=65535)]),
             Dir('dbpath'),
             Str('global_aux', max_length=None),
             Str('map_acls', enum=['RIGHTS', 'MODE', 'NONE']),
             Str('chmod_request', enum=['PRESERVE', 'SIMPLE', 'IGNORE']),
             Str('loglevel', enum=[x.name for x in AFPLogLevel]),
             update=True))
    async def do_update(self, data):
        """
        Update AFP service settings.

        `bindip` is a list of IPs to bind AFP to. Leave blank (empty list) to bind to all
        available IPs.

        `map_acls` defines how to map the effective permissions of authenticated users.
        RIGHTS - Unix-style permissions
        MODE - ACLs
        NONE - Do not map

        `chmod_request` defines advanced permission control that deals with ACLs.
        PRESERVE - Preserve ZFS ACEs for named users and groups or POSIX ACL group mask
        SIMPLE - Change permission as requested without any extra steps
        IGNORE - Permission change requests are ignored
        """
        old = await self.config()

        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['dbpath']:
            await check_path_resides_within_volume(
                verrors,
                self.middleware,
                'afp_update.dbpath',
                new['dbpath'],
            )

        verrors.check()

        new = await self.compress(new)
        await self._update_service(old, new)

        return await self.config()

    @accepts()
    async def bindip_choices(self):
        """
        List of valid choices for IP addresses to which to bind the AFP service.
        """
        return {
            d['address']: d['address']
            for d in await self.middleware.call('interface.ip_in_use')
        }
示例#30
0
class NISService(ConfigService):
    class Config:
        service = "nis"
        datastore = 'directoryservice.nis'
        datastore_extend = "nis.nis_extend"
        datastore_prefix = "nis_"

    @private
    async def nis_extend(self, nis):
        nis['servers'] = nis['servers'].split(',') if nis['servers'] else []
        return nis

    @private
    async def nis_compress(self, nis):
        nis['servers'] = ','.join(nis['servers'])
        return nis

    @accepts(
        Dict('nis_update',
             Str('domain'),
             List('servers'),
             Bool('secure_mode'),
             Bool('manycast'),
             Bool('enable'),
             update=True))
    async def do_update(self, data):
        """
        Update NIS Service Configuration.

        `domain` is the name of NIS domain.

        `servers` is a list of hostnames/IP addresses.

        `secure_mode` when enabled sets ypbind(8) to refuse binding to any NIS server not running as root on a
        TCP port over 1024.

        `manycast` when enabled sets ypbind(8) to bind to the server that responds the fastest.

        `enable` when true disables the configuration of the NIS service.
        """
        must_reload = False
        old = await self.config()
        new = old.copy()
        new.update(data)
        if old != new:
            must_reload = True
        await self.nis_compress(new)
        await self.middleware.call('datastore.update', 'directoryservice.nis',
                                   old['id'], new, {'prefix': 'nis_'})

        if must_reload:
            if new['enable']:
                await self.middleware.call('nis.start')
            else:
                await self.middleware.call('nis.stop')

        return await self.config()

    @private
    async def set_state(self, state):
        return await self.middleware.call('directoryservices.set_state',
                                          {'nis': state.name})

    @accepts()
    async def get_state(self):
        """
        Wrapper function for 'directoryservices.get_state'. Returns only the state of the
        NIS service.
        """
        return (await
                self.middleware.call('directoryservices.get_state'))['nis']

    @private
    async def start(self):
        """
        Refuse to start service if the service is alreading in process of starting or stopping.
        If state is 'HEALTHY' or 'FAULTED', then stop the service first before restarting it to ensure
        that the service begins in a clean state.
        """
        state = await self.get_state()
        nis = await self.config()
        if state in ['FAULTED', 'HEALTHY']:
            await self.stop()

        if state in ['EXITING', 'JOINING']:
            raise CallError(
                f'Current state of NIS service is: [{state}]. Wait until operation completes.',
                errno.EBUSY)

        await self.set_state(DSStatus['JOINING'])
        await self.middleware.call('datastore.update', 'directoryservice.nis',
                                   nis['id'], {'nis_enable': True})
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'pam')
        await self.middleware.call('etc.generate', 'hostname')
        await self.middleware.call('etc.generate', 'nss')
        setnisdomain = await run(['/bin/domainname', nis['domain']],
                                 check=False)
        if setnisdomain.returncode != 0:
            await self.set_state(DSStatus['FAULTED'])
            raise CallError(
                f'Failed to set NIS Domain to [{nis["domain"]}]: {setnisdomain.stderr.decode()}'
            )

        ypbind = await run(['/usr/sbin/service', 'ypbind', 'onestart'],
                           check=False)
        if ypbind.returncode != 0:
            await self.set_state(DSStatus['FAULTED'])
            raise CallError(f'ypbind failed: {ypbind.stderr.decode()}')

        await self.set_state(DSStatus['HEALTHY'])
        self.logger.debug(
            f'NIS service successfully started. Setting state to HEALTHY.')
        await self.middleware.call('nis.fill_cache')
        return True

    @private
    async def __ypwhich(self):
        """
        The return code from ypwhich is not a reliable health indicator. For example, RPC failure will return 0.
        There are edge cases where ypwhich can hang when NIS is misconfigured.
        """
        ypwhich = await run(['/usr/bin/ypwhich'], check=False)

        if ypwhich.stderr:
            raise CallError(
                f'NIS status check returned [{ypwhich.stderr.decode().strip()}]. Setting state to FAULTED.'
            )
        return True

    @private
    async def started(self):
        ret = False
        if not (await self.config())['enable']:
            return ret
        try:
            ret = await asyncio.wait_for(self.__ypwhich(), timeout=5.0)
        except asyncio.TimeoutError:
            raise CallError('nis.started check timed out after 5 seconds.')

        return ret

    @private
    async def stop(self, force=False):
        """
        Remove NIS_state entry entirely after stopping ypbind. This is so that the 'enable' checkbox
        becomes the sole source of truth regarding a service's state when it is disabled.
        """
        state = await self.get_state()
        nis = await self.config()
        if not force:
            if state in ['LEAVING', 'JOINING']:
                raise CallError(
                    f'Current state of NIS service is: [{state}]. Wait until operation completes.',
                    errno.EBUSY)

        await self.__set_state(DSStatus['LEAVING'])
        await self.middleware.call('datastore.update', 'directoryservice.nis',
                                   nis['id'], {'nis_enable': False})

        ypbind = await run(['/usr/sbin/service', 'ypbind', 'onestop'],
                           check=False)
        if ypbind.returncode != 0:
            await self.__set_state(DSStatus['FAULTED'])
            errmsg = ypbind.stderr.decode().strip()
            if 'ypbind not running' not in errmsg:
                raise CallError(
                    f'ypbind failed to stop: [{ypbind.stderr.decode().strip()}]'
                )

        await self.middleware.call('cache.pop', 'NIS_State')
        await self.middleware.call('etc.generate', 'rc')
        await self.middleware.call('etc.generate', 'pam')
        await self.middleware.call('etc.generate', 'hostname')
        await self.middleware.call('etc.generate', 'nss')
        await self.__set_state(DSStatus['DISABLED'])
        self.logger.debug(
            f'NIS service successfully stopped. Setting state to DISABLED.')
        return True

    @private
    @job(lock=lambda args: 'fill_nis_cache')
    def fill_cache(self, job, force=False):
        user_next_index = group_next_index = 200000000
        if self.middleware.call_sync('cache.has_key',
                                     'NIS_cache') and not force:
            raise CallError(
                'NIS cache already exists. Refusing to generate cache.')

        self.middleware.call_sync('cache.pop', 'NIS_cache')
        pwd_list = pwd.getpwall()
        grp_list = grp.getgrall()

        local_uid_list = list(u['uid']
                              for u in self.middleware.call_sync('user.query'))
        local_gid_list = list(
            g['gid'] for g in self.middleware.call_sync('group.query'))
        cache_data = {'users': [], 'groups': []}

        for u in pwd_list:
            is_local_user = True if u.pw_uid in local_uid_list else False
            if is_local_user:
                continue

            cache_data['users'].append({
                u.pw_name: {
                    'id': user_next_index,
                    'uid': u.pw_uid,
                    'username': u.pw_name,
                    'unixhash': None,
                    'smbhash': None,
                    'group': {},
                    'home': '',
                    'shell': '',
                    'full_name': u.pw_gecos,
                    'builtin': False,
                    'email': '',
                    'password_disabled': False,
                    'locked': False,
                    'sudo': False,
                    'microsoft_account': False,
                    'attributes': {},
                    'groups': [],
                    'sshpubkey': None,
                    'local': False
                }
            })
            user_next_index += 1

        for g in grp_list:
            is_local_user = True if g.gr_gid in local_gid_list else False
            if is_local_user:
                continue

            cache_data['groups'].append({
                g.gr_name: {
                    'id': group_next_index,
                    'gid': g.gr_gid,
                    'group': g.gr_name,
                    'builtin': False,
                    'sudo': False,
                    'users': [],
                    'local': False
                }
            })
            group_next_index += 1

        self.middleware.call_sync('cache.put', 'NIS_cache', cache_data)
        self.middleware.call_sync('dscache.backup')

    @private
    async def get_cache(self):
        if not await self.middleware.call('cache.has_key', 'NIS_cache'):
            await self.middleware.call('nis.fill_cache')
            self.logger.debug('cache fill is in progress.')
            return {'users': [], 'groups': []}
        return await self.middleware.call('cache.get', 'nis_cache')