示例#1
0
class SystemService(Service):
    CPU_INFO = {
        'cpu_model': None,
        'core_count': None,
        'physical_core_count': None,
    }

    MEM_INFO = {
        'physmem_size': None,
    }

    BIRTHDAY_DATE = {
        'date': None,
    }

    HOST_ID = None

    class Config:
        cli_namespace = 'system'

    @private
    async def birthday(self):

        if self.BIRTHDAY_DATE['date'] is None:
            birth = (await self.middleware.call('datastore.config', 'system.settings'))['stg_birthday']
            if birth != datetime(1970, 1, 1):
                self.BIRTHDAY_DATE['date'] = birth

        return self.BIRTHDAY_DATE

    @private
    async def mem_info(self):

        if self.MEM_INFO['physmem_size'] is None:
            # physmem doesn't change after boot so cache the results
            self.MEM_INFO['physmem_size'] = psutil.virtual_memory().total

        return self.MEM_INFO

    @private
    def get_cpu_model(self):
        with open('/proc/cpuinfo', 'r') as f:
            model = RE_CPU_MODEL.search(f.read())
            return model.group(1) if model else None

    @private
    async def cpu_info(self):
        """
        CPU info doesn't change after boot so cache the results
        """

        if self.CPU_INFO['cpu_model'] is None:
            self.CPU_INFO['cpu_model'] = await self.middleware.call('system.get_cpu_model')

        if self.CPU_INFO['core_count'] is None:
            self.CPU_INFO['core_count'] = psutil.cpu_count(logical=True)

        if self.CPU_INFO['physical_core_count'] is None:
            self.CPU_INFO['physical_core_count'] = psutil.cpu_count(logical=False)

        return self.CPU_INFO

    @private
    async def time_info(self):
        uptime_seconds = time.clock_gettime(time.CLOCK_MONOTONIC_RAW)
        current_time = time.time()

        return {
            'uptime_seconds': uptime_seconds,
            'uptime': str(timedelta(seconds=uptime_seconds)),
            'boot_time': datetime.fromtimestamp((current_time - uptime_seconds), timezone.utc),
            'datetime': datetime.fromtimestamp(current_time, timezone.utc),
        }

    @private
    async def hostname(self):
        return socket.gethostname()

    @accepts()
    @returns(Str('system_host_identifier'))
    def host_id(self):
        """
        Retrieve a hex string that is generated based
        on the contents of the `/etc/hostid` file. This
        is a permanent value that persists across
        reboots/upgrades and can be used as a unique
        identifier for the machine.
        """
        if self.HOST_ID is None:
            with open('/etc/hostid', 'rb') as f:
                id = f.read().strip()
                if id:
                    self.HOST_ID = hashlib.sha256(id).hexdigest()

        return self.HOST_ID

    @no_auth_required
    @throttle(seconds=2, condition=throttle_condition)
    @accepts()
    @returns(Datetime('system_build_time'))
    @pass_app()
    async def build_time(self, app):
        """
        Retrieve build time of the system.
        """
        buildtime = sw_buildtime()
        return datetime.fromtimestamp(int(buildtime)) if buildtime else buildtime

    @accepts()
    @returns(Dict(
        'system_info',
        Str('version', required=True, title='TrueNAS Version'),
        Datetime('buildtime', required=True, title='TrueNAS build time'),
        Str('hostname', required=True, title='System host name'),
        Int('physmem', required=True, title='System physical memory'),
        Str('model', required=True, title='CPU Model'),
        Int('cores', required=True, title='CPU Cores'),
        Int('physical_cores', required=True, title='CPU Physical Cores'),
        List('loadavg', required=True),
        Str('uptime', required=True),
        Float('uptime_seconds', required=True),
        Str('system_serial', required=True, null=True),
        Str('system_product', required=True, null=True),
        Str('system_product_version', required=True, null=True),
        Dict('license', additional_attrs=True, null=True),  # TODO: Fill this in please
        Datetime('boottime', required=True),
        Datetime('datetime', required=True),
        Datetime('birthday', required=True, null=True),
        Str('timezone', required=True),
        Str('system_manufacturer', required=True, null=True),
        Bool('ecc_memory', required=True),
    ))
    async def info(self):
        """
        Returns basic system information.
        """
        time_info = await self.middleware.call('system.time_info')
        dmidecode = await self.middleware.call('system.dmidecode_info')
        cpu_info = await self.middleware.call('system.cpu_info')
        mem_info = await self.middleware.call('system.mem_info')
        birthday = await self.middleware.call('system.birthday')
        timezone_setting = (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone']

        return {
            'version': await self.middleware.call('system.version'),
            'buildtime': await self.middleware.call('system.build_time'),
            'hostname': await self.middleware.call('system.hostname'),
            'physmem': mem_info['physmem_size'],
            'model': cpu_info['cpu_model'],
            'cores': cpu_info['core_count'],
            'physical_cores': cpu_info['physical_core_count'],
            'loadavg': list(os.getloadavg()),
            'uptime': time_info['uptime'],
            'uptime_seconds': time_info['uptime_seconds'],
            'system_serial': dmidecode['system-serial-number'] if dmidecode['system-serial-number'] else None,
            'system_product': dmidecode['system-product-name'] if dmidecode['system-product-name'] else None,
            'system_product_version': dmidecode['system-version'] if dmidecode['system-version'] else None,
            'license': await self.middleware.call('system.license'),
            'boottime': time_info['boot_time'],
            'datetime': time_info['datetime'],
            'birthday': birthday['date'],
            'timezone': timezone_setting,
            'system_manufacturer': dmidecode['system-manufacturer'] if dmidecode['system-manufacturer'] else None,
            'ecc_memory': dmidecode['ecc-memory'],
        }

    @private
    def get_synced_clock_time(self):
        """
        Will return synced clock time if ntpd has synced with ntp servers
        otherwise will return none
        """
        client = ntplib.NTPClient()
        try:
            response = client.request('localhost')
        except Exception:
            # Cannot connect to NTP server
            self.logger.error('Error while connecting to NTP server', exc_info=True)
        else:
            if response.version and response.leap != 3:
                # https://github.com/darkhelmet/ntpstat/blob/11f1d49cf4041169e1f741f331f65645b67680d8/ntpstat.c#L172
                # if leap second indicator is 3, it means that the clock has not been synchronized
                return datetime.fromtimestamp(response.tx_time, timezone.utc)
示例#2
0
from middlewared.schema import Bool, Datetime, Dict, Int, List, OROperator, Str


CERT_ENTRY = Dict(
    'certificate_entry',
    Int('id'),
    Int('type'),
    Str('name'),
    Str('certificate', null=True, max_length=None),
    Str('privatekey', null=True, max_length=None),
    Str('CSR', null=True, max_length=None),
    Str('acme_uri', null=True),
    Dict('domains_authenticators', additional_attrs=True, null=True),
    Int('renew_days'),
    Datetime('revoked_date', null=True),
    Dict('signedby', additional_attrs=True, null=True),
    Str('root_path'),
    Dict('acme', additional_attrs=True, null=True),
    Str('certificate_path', null=True),
    Str('privatekey_path', null=True),
    Str('csr_path', null=True),
    Str('cert_type'),
    Bool('revoked'),
    OROperator(Str('issuer', null=True), Dict('issuer', additional_attrs=True, null=True), name='issuer'),
    List('chain_list', items=[Str('certificate', max_length=None)]),
    Str('country', null=True),
    Str('state', null=True),
    Str('city', null=True),
    Str('organization', null=True),
    Str('organizational_unit', null=True),
示例#3
0
class KerberosKeytabService(TDBWrapCRUDService):
    class Config:
        datastore = 'directoryservice.kerberoskeytab'
        datastore_prefix = 'keytab_'
        namespace = 'kerberos.keytab'
        cli_namespace = 'directory_service.kerberos.keytab'

    ENTRY = Patch(
        'kerberos_keytab_create',
        'kerberos_keytab_entry',
        ('add', Int('id')),
    )

    @accepts(
        Dict('kerberos_keytab_create',
             Str('file', max_length=None),
             Str('name'),
             register=True))
    async def do_create(self, data):
        """
        Create a kerberos keytab. Uploaded keytab files will be merged with the system
        keytab under /etc/krb5.keytab.

        `file` b64encoded kerberos keytab
        `name` name for kerberos keytab
        """
        verrors = ValidationErrors()

        verrors.add_child('kerberos_principal_create', await
                          self._validate(data))

        if verrors:
            raise verrors

        id = await super().do_create(data)
        await self.middleware.call('etc.generate', 'kerberos')

        return await self._get_instance(id)

    @accepts(Int('id', required=True),
             Patch(
                 'kerberos_keytab_create',
                 'kerberos_keytab_update',
             ))
    async def do_update(self, id, data):
        """
        Update kerberos keytab by id.
        """
        old = await self._get_instance(id)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        verrors.add_child('kerberos_principal_update', await
                          self._validate(new))

        if verrors:
            raise verrors

        await super().do_update(id, new)
        await self.middleware.call('etc.generate', 'kerberos')

        return await self._get_instance(id)

    @accepts(Int('id'))
    async def do_delete(self, id):
        """
        Delete kerberos keytab by id, and force regeneration of
        system keytab.
        """
        await super().do_delete(id)
        if os.path.exists(keytab['SYSTEM'].value):
            os.remove(keytab['SYSTEM'].value)
        await self.middleware.call('etc.generate', 'kerberos')
        await self._cleanup_kerberos_principals()
        await self.middleware.call('kerberos.stop')
        try:
            await self.middleware.call('kerberos.start')
        except Exception as e:
            self.logger.debug(
                'Failed to start kerberos service after deleting keytab entry: %s'
                % e)

    @accepts(Dict(
        'keytab_data',
        Str('name', required=True),
    ))
    @returns(Ref('kerberos_keytab_entry'))
    @job(lock='upload_keytab', pipes=['input'], check_pipes=True)
    async def upload_keytab(self, job, data):
        """
        Upload a keytab file. This method expects the keytab file to be uploaded using
        the /_upload/ endpoint.
        """
        ktmem = io.BytesIO()
        await self.middleware.run_in_thread(shutil.copyfileobj,
                                            job.pipes.input.r, ktmem)
        b64kt = base64.b64encode(ktmem.getvalue())
        return await self.middleware.call('kerberos.keytab.create', {
            'name': data['name'],
            'file': b64kt.decode()
        })

    @private
    async def legacy_validate(self, keytab):
        err = await self._validate({'file': keytab})
        try:
            err.check()
        except Exception as e:
            raise CallError(e)

    @private
    async def _cleanup_kerberos_principals(self):
        principal_choices = await self.middleware.call(
            'kerberos.keytab.kerberos_principal_choices')
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        if ad['kerberos_principal'] and ad[
                'kerberos_principal'] not in principal_choices:
            await self.middleware.call('activedirectory.update',
                                       {'kerberos_principal': ''})
        if ldap['kerberos_principal'] and ldap[
                'kerberos_principal'] not in principal_choices:
            await self.middleware.call('ldap.update',
                                       {'kerberos_principal': ''})

    @private
    async def do_ktutil_list(self, data):
        kt = data.get("kt_name", keytab.SYSTEM.value)
        ktutil = await run(["klist", "-tek", kt], check=False)
        if ktutil.returncode != 0:
            raise CallError(ktutil.stderr.decode())
        ret = ktutil.stdout.decode().splitlines()
        if len(ret) < 4:
            return []

        return '\n'.join(ret[3:])

    @private
    async def _validate(self, data):
        """
        For now validation is limited to checking if we can resolve the hostnames
        configured for the kdc, admin_server, and kpasswd_server can be resolved
        by DNS, and if the realm can be resolved by DNS.
        """
        verrors = ValidationErrors()
        try:
            decoded = base64.b64decode(data['file'])
        except Exception as e:
            verrors.add(
                "kerberos.keytab_create",
                f"Keytab is a not a properly base64-encoded string: [{e}]")
            return verrors

        with open(keytab['TEST'].value, "wb") as f:
            f.write(decoded)

        try:
            await self.do_ktutil_list({"kt_name": keytab['TEST'].value})
        except CallError as e:
            verrors.add("kerberos.keytab_create",
                        f"Failed to validate keytab: [{e.errmsg}]")

        os.unlink(keytab['TEST'].value)

        return verrors

    @private
    async def _ktutil_list(self, keytab_file=keytab['SYSTEM'].value):
        keytab_entries = []
        try:
            kt_list_output = await self.do_ktutil_list(
                {"kt_name": keytab_file})
        except Exception as e:
            self.logger.warning("Failed to list kerberos keytab [%s]: %s",
                                keytab_file, e)
            kt_list_output = None

        if not kt_list_output:
            return keytab_entries

        for idx, line in enumerate(kt_list_output.splitlines()):
            fields = line.split()
            keytab_entries.append({
                'slot':
                idx + 1,
                'kvno':
                int(fields[0]),
                'principal':
                fields[3],
                'etype':
                fields[4][1:-1].strip('DEPRECATED:'),
                'etype_deprecated':
                fields[4][1:].startswith('DEPRECATED'),
                'date':
                time.strptime(fields[1], '%m/%d/%y'),
            })

        return keytab_entries

    @accepts()
    @returns(
        List('system-keytab',
             items=[
                 Dict('keytab-entry', Int('slot'), Int('kvno'),
                      Str('principal'), Str('etype'), Bool('etype_deprecated'),
                      Datetime('date'))
             ]))
    async def system_keytab_list(self):
        """
        Returns content of system keytab (/etc/krb5.keytab).
        """
        kt_list = await self._ktutil_list()
        parsed = []
        for entry in kt_list:
            entry['date'] = time.mktime(entry['date'])
            parsed.append(entry)

        return parsed

    @private
    async def _get_nonsamba_principals(self, keytab_list):
        """
        Generate list of Kerberos principals that are not the AD machine account.
        """
        ad = await self.middleware.call('activedirectory.config')
        pruned_list = []
        for i in keytab_list:
            if ad['netbiosname'].casefold() not in i['principal'].casefold():
                pruned_list.append(i)

        return pruned_list

    @private
    async def _generate_tmp_keytab(self):
        """
        Generate a temporary keytab to separate out the machine account keytab principal.
        ktutil copy returns 1 even if copy succeeds.
        """
        with contextlib.suppress(OSError):
            os.remove(keytab['SAMBA'].value)

        kt_copy = await Popen(['ktutil'],
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE,
                              stdin=subprocess.PIPE)
        output = await kt_copy.communicate(
            f'rkt {keytab.SYSTEM.value}\nwkt {keytab.SAMBA.value}\nq\n'.encode(
            ))
        if output[1]:
            raise CallError(
                f"failed to generate [{keytab['SAMBA'].value}]: {output[1].decode()}"
            )

    @private
    async def _prune_keytab_principals(self, to_delete=[]):
        """
        Delete all keytab entries from the tmp keytab that are not samba entries.
        The pruned keytab must be written to a new file to avoid duplication of
        entries.
        """
        rkt = f"rkt {keytab.SAMBA.value}"
        wkt = "wkt /var/db/system/samba4/samba_mit.keytab"
        delents = "\n".join(f"delent {x['slot']}" for x in reversed(to_delete))
        ktutil_remove = await Popen(['ktutil'],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE,
                                    stdin=subprocess.PIPE)
        output = await ktutil_remove.communicate(
            f'{rkt}\n{delents}\n{wkt}\nq\n'.encode())
        if output[1]:
            raise CallError(output[1].decode())

        with contextlib.suppress(OSError):
            os.remove(keytab.SAMBA.value)

        os.rename("/var/db/system/samba4/samba_mit.keytab", keytab.SAMBA.value)

    @private
    async def kerberos_principal_choices(self):
        """
        Keytabs typically have multiple entries for same principal (differentiated by enc_type).
        Since the enctype isn't relevant in this situation, only show unique principal names.

        Return empty list if system keytab doesn't exist.
        """
        if not os.path.exists(keytab['SYSTEM'].value):
            return []

        try:
            keytab_list = await self._ktutil_list()
        except Exception as e:
            self.logger.trace(
                '"ktutil list" failed. Generating empty list of kerberos principal choices. Error: %s'
                % e)
            return []

        kerberos_principals = []
        for entry in keytab_list:
            if entry['principal'] not in kerberos_principals:
                kerberos_principals.append(entry['principal'])

        return sorted(kerberos_principals)

    @private
    async def has_nfs_principal(self):
        """
        This method checks whether the kerberos keytab contains an nfs service principal
        """
        principals = await self.kerberos_principal_choices()
        for p in principals:
            if p.startswith("nfs/"):
                return True

        return False

    @private
    async def store_samba_keytab(self):
        """
        Samba will automatically generate system keytab entries for the AD machine account
        (netbios name with '$' appended), and maintain them through machine account password changes.

        Copy the system keytab, parse it, and update the corresponding keytab entry in the freenas configuration
        database.

        The current system kerberos keytab and compare with a cached copy before overwriting it when a new
        keytab is generated through middleware 'etc.generate kerberos'.
        """
        if not os.path.exists(keytab['SYSTEM'].value):
            return False

        encoded_keytab = None
        keytab_list = await self._ktutil_list()
        items_to_remove = await self._get_nonsamba_principals(keytab_list)
        await self._generate_tmp_keytab()
        await self._prune_keytab_principals(items_to_remove)
        with open(keytab['SAMBA'].value, 'rb') as f:
            encoded_keytab = base64.b64encode(f.read())

        if not encoded_keytab:
            self.logger.debug(
                f"Failed to generate b64encoded version of {keytab['SAMBA'].name}"
            )
            return False

        keytab_file = encoded_keytab.decode()
        entry = await self.query([('name', '=', 'AD_MACHINE_ACCOUNT')])
        if not entry:
            await self.middleware.call('kerberos.keytab.direct_create', {
                'name': 'AD_MACHINE_ACCOUNT',
                'file': keytab_file
            })
        else:
            id = entry[0]['id']
            updated_entry = {'name': 'AD_MACHINE_ACCOUNT', 'file': keytab_file}
            await self.middleware.call('kerberos.keytab.direct_update', id,
                                       updated_entry)

        sambakt = await self.query([('name', '=', 'AD_MACHINE_ACCOUNT')])
        if sambakt:
            return sambakt[0]['id']

    @periodic(3600)
    @private
    async def check_updated_keytab(self):
        """
        Check mtime of current kerberos keytab. If it has changed since last check,
        assume that samba has updated it behind the scenes and that the configuration
        database needs to be updated to reflect the change.
        """
        if not await self.middleware.call('system.ready'):
            return

        old_mtime = 0
        ad_state = await self.middleware.call('activedirectory.get_state')
        if ad_state == 'DISABLED' or not os.path.exists(
                keytab['SYSTEM'].value):
            return

        if (await
                self.middleware.call("smb.get_smb_ha_mode")) in ("LEGACY",
                                                                 "CLUSTERED"):
            return

        if await self.middleware.call('cache.has_key', 'KEYTAB_MTIME'):
            old_mtime = await self.middleware.call('cache.get', 'KEYTAB_MTIME')

        new_mtime = (os.stat(keytab['SYSTEM'].value)).st_mtime
        if old_mtime == new_mtime:
            return

        ts = await self.middleware.call(
            'directoryservices.get_last_password_change')
        if ts['dbconfig'] == ts['secrets']:
            return

        self.logger.debug(
            "Machine account password has changed. Stored copies of "
            "kerberos keytab and directory services secrets will now "
            "be updated.")

        await self.middleware.call('directoryservices.backup_secrets')
        await self.store_samba_keytab()
        self.logger.trace('Updating stored AD machine account kerberos keytab')
        await self.middleware.call('cache.put', 'KEYTAB_MTIME',
                                   (os.stat(keytab['SYSTEM'].value)).st_mtime)
示例#4
0
class BootService(Service):

    class Config:
        cli_namespace = 'system.boot'

    @private
    async def pool_name(self):
        return BOOT_POOL_NAME

    @accepts()
    @returns(
        Dict(
            'boot_pool_state',
            Str('name'),
            Str('id'),
            Str('guid'),
            Str('hostname'),
            Str('status'),
            Bool('healthy'),
            Int('error_count'),
            Dict(
                'root_dataset',
                Str('id'),
                Str('name'),
                Str('pool'),
                Str('type'),
                Dict(
                    'properties',
                    additional_attrs=True,
                ),
                Str('mountpoint', null=True),
                Bool('encrypted'),
                Str('encryption_root', null=True),
                Bool('key_loaded'),
            ),
            Dict(
                'properties',
                additional_attrs=True,
            ),
            List('features', items=[Dict(
                'feature_item',
                Str('name'),
                Str('guid'),
                Str('description'),
                Str('state'),
            )]),
            Dict(
                'scan',
                Str('function'),
                Str('state'),
                Datetime('start_time', null=True),
                Datetime('end_time', null=True),
                Float('percentage'),
                Int('bytes_to_process'),
                Int('bytes_processed'),
                Datetime('pause', null=True),
                Int('errors'),
                Int('bytes_issued', null=True),
                Int('total_secs_left', null=True),

            ),
            Dict(
                'root_vdev',
                Str('type'),
                Str('path', null=True),
                Str('guid'),
                Str('status'),
                Dict(
                    'stats',
                    Int('timestamp'),
                    Int('read_errors'),
                    Int('write_errors'),
                    Int('checksum_errors'),
                    List('ops', items=[Int('op')]),
                    List('bytes', items=[Int('byte')]),
                    Int('size'),
                    Int('allocated'),
                    Int('fragmentation'),
                    Int('self_healed'),
                    Int('configured_ashift'),
                    Int('logical_ashift'),
                    Int('physical_ashift'),
                ),
            ),
            Dict(
                'groups',
                additional_attrs=True,
            ),
            Str('status_code'),
            Str('status_detail'),
        ),
    )
    async def get_state(self):
        """
        Returns the current state of the boot pool, including all vdevs, properties and datasets.
        """
        return await self.middleware.call('zfs.pool.query', [('name', '=', BOOT_POOL_NAME)], {'get': True})

    @accepts()
    @returns(List('disks', items=[Str('disk')]))
    async def get_disks(self):
        """
        Returns disks of the boot pool.
        """
        return await self.middleware.call('zfs.pool.get_disks', BOOT_POOL_NAME)

    @private
    async def get_boot_type(self):
        """
        Get the boot type of the boot pool.

        Returns:
            "BIOS", "EFI", None
        """
        # https://wiki.debian.org/UEFI
        return 'EFI' if os.path.exists('/sys/firmware/efi') else 'BIOS'

    @accepts(
        Str('dev'),
        Dict(
            'options',
            Bool('expand', default=False),
        ),
    )
    @returns()
    @job(lock='boot_attach')
    async def attach(self, job, dev, options):
        """
        Attach a disk to the boot pool, turning a stripe into a mirror.

        `expand` option will determine whether the new disk partition will be
                 the maximum available or the same size as the current disk.
        """

        disks = list(await self.get_disks())
        if len(disks) > 1:
            raise CallError('3-way mirror not supported')

        format_opts = {}
        if not options['expand']:
            # Lets try to find out the size of the current freebsd-zfs partition so
            # the new partition is not bigger, preventing size mismatch if one of
            # them fail later on. See #21336
            zfs_part = await self.middleware.call('disk.get_partition', disks[0], 'ZFS')
            if zfs_part:
                format_opts['size'] = zfs_part['size']

        swap_part = await self.middleware.call('disk.get_partition', disks[0], 'SWAP')
        if swap_part:
            format_opts['swap_size'] = swap_part['size']
        await self.middleware.call('boot.format', dev, format_opts)

        pool = await self.middleware.call('zfs.pool.query', [['name', '=', BOOT_POOL_NAME]], {'get': True})

        zfs_dev_part = await self.middleware.call('disk.get_partition', dev, 'ZFS')
        extend_pool_job = await self.middleware.call(
            'zfs.pool.extend', BOOT_POOL_NAME, None, [{
                'target': pool['groups']['data'][0]['guid'],
                'type': 'DISK',
                'path': f'/dev/{zfs_dev_part["name"]}'
            }]
        )

        await self.middleware.call('boot.install_loader', dev)

        await job.wrap(extend_pool_job)

        # If the user is upgrading his disks, let's set expand to True to make sure that we
        # register the new disks capacity which increase the size of the pool
        await self.middleware.call('zfs.pool.online', BOOT_POOL_NAME, zfs_dev_part['name'], True)

        await self.update_initramfs()

    @accepts(Str('dev'))
    @returns()
    async def detach(self, dev):
        """
        Detach given `dev` from boot pool.
        """
        await self.middleware.call('zfs.pool.detach', BOOT_POOL_NAME, dev, {'clear_label': True})
        await self.update_initramfs()

    @accepts(Str('label'), Str('dev'))
    @returns()
    async def replace(self, label, dev):
        """
        Replace device `label` on boot pool with `dev`.
        """
        format_opts = {}
        disks = list(await self.get_disks())
        swap_part = await self.middleware.call('disk.get_partition', disks[0], 'SWAP')
        if swap_part:
            format_opts['swap_size'] = swap_part['size']

        await self.middleware.call('boot.format', dev, format_opts)
        zfs_dev_part = await self.middleware.call('disk.get_partition', dev, 'ZFS')
        await self.middleware.call('zfs.pool.replace', BOOT_POOL_NAME, label, zfs_dev_part['name'])
        await self.middleware.call('boot.install_loader', dev)
        await self.update_initramfs()

    @accepts()
    @returns()
    @job(lock='boot_scrub')
    async def scrub(self, job):
        """
        Scrub on boot pool.
        """
        subjob = await self.middleware.call('pool.scrub.scrub', BOOT_POOL_NAME)
        return await job.wrap(subjob)

    @accepts(
        Int('interval', validators=[Range(min=1)])
    )
    @returns(Int('interval'))
    async def set_scrub_interval(self, interval):
        """
        Set Automatic Scrub Interval value in days.
        """
        await self.middleware.call(
            'datastore.update',
            'system.advanced',
            (await self.middleware.call('system.advanced.config'))['id'],
            {'adv_boot_scrub': interval},
        )
        return interval

    @accepts()
    @returns(Int('interval'))
    async def get_scrub_interval(self):
        """
        Get Automatic Scrub Interval value in days.
        """
        return (await self.middleware.call('system.advanced.config'))['boot_scrub']

    @private
    async def update_initramfs(self):
        """
        Returns true if initramfs was updated and false otherwise.
        """
        cp = await run(
            '/usr/local/bin/truenas-initrd.py', '/', encoding='utf8', errors='ignore', check=False
        )
        if cp.returncode > 1:
            raise CallError(f'Failed to update initramfs: {cp.stderr}')

        return cp.returncode == 1

    @private
    async def expand(self):
        boot_pool = await self.middleware.call('boot.pool_name')
        for device in await self.middleware.call('zfs.pool.get_devices', boot_pool):
            try:
                await self.expand_device(device)
            except CallError as e:
                self.middleware.logger.error('Error trying to expand boot pool partition %r: %r', device, e)
            except Exception:
                self.middleware.logger.error('Error trying to expand boot pool partition %r', device, exc_info=True)

    @private
    async def expand_device(self, device):
        disk = await self.middleware.call('disk.get_disk_from_partition', device)

        partitions = await self.middleware.call('disk.list_partitions', disk)
        if len(partitions) != 3:
            raise CallError(f'Expected 3 partitions, found {len(partitions)}')

        if partitions[-1]['name'] != device:
            raise CallError(f'{device} is not the last partition')

        if partitions[-1]['partition_number'] != 3:
            raise CallError(f'{device} is not 3rd partition')

        if partitions[-1]['start_sector'] != partitions[-2]['end_sector'] + 1:
            raise CallError(f'{device} does not immediately follow the 2nd partition')

        disk_size = await self.middleware.call('disk.get_dev_size', disk)
        if partitions[-1]['end'] > disk_size / 1.1:
            return

        self.middleware.logger.info('Resizing boot pool partition %r from %r (disk_size = %r)',
                                    device, partitions[-1]['end'], disk_size)
        await run('sgdisk', '-d', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
        await run('sgdisk', '-N', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
        await run('partprobe', encoding='utf-8', errors='ignore')
        await run('zpool', 'online', '-e', 'boot-pool', device, encoding='utf-8', errors='ignore')
示例#5
0
class SMARTTestService(CRUDService):
    class Config:
        datastore = 'tasks.smarttest'
        datastore_extend = 'smart.test.smart_test_extend'
        datastore_prefix = 'smarttest_'
        namespace = 'smart.test'
        cli_namespace = 'task.smart_test'

    ENTRY = Patch(
        'smart_task_create',
        'smart_task_entry',
        ('add', Int('id')),
    )

    @private
    async def smart_test_extend(self, data):
        disks = data.pop('disks')
        data['disks'] = [disk['disk_identifier'] for disk in disks]
        test_type = {
            'L': 'LONG',
            'S': 'SHORT',
            'C': 'CONVEYANCE',
            'O': 'OFFLINE',
        }
        data['type'] = test_type[data.pop('type')]
        Cron.convert_db_format_to_schedule(data)
        return data

    @private
    async def validate_data(self, data, schema):
        verrors = ValidationErrors()

        smart_tests = await self.query(filters=[('type', '=', data['type'])])
        configured_disks = [d for test in smart_tests for d in test['disks']]
        disks_dict = await self.disk_choices()

        disks = data.get('disks')
        used_disks = []
        invalid_disks = []
        for disk in disks:
            if disk in configured_disks:
                used_disks.append(disks_dict[disk])
            if disk not in disks_dict.keys():
                invalid_disks.append(disk)

        if used_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks already have tests for this type: {", ".join(used_disks)}'
            )

        if invalid_disks:
            verrors.add(
                f'{schema}.disks',
                f'The following disks are invalid: {", ".join(invalid_disks)}')

        return verrors

    @accepts(Bool('full_disk', default=False))
    async def disk_choices(self, full_disk):
        """
        Returns disk choices for S.M.A.R.T. test.

        `full_disk` will return full disk objects instead of just names.
        """
        return {
            disk['identifier']: disk if full_disk else disk['name']
            for disk in await self.middleware.call('disk.query',
                                                   [['devname', '!^', 'nv']])
            if await self.middleware.call('disk.smartctl_args', disk['name'])
            is not None
        }

    @accepts(
        Dict('smart_task_create',
             Cron('schedule', exclude=['minute']),
             Str('desc'),
             Bool('all_disks', default=False),
             List('disks', items=[Str('disk')]),
             Str('type',
                 enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                 required=True),
             register=True))
    async def do_create(self, data):
        """
        Create a SMART Test Task.

        `disks` is a list of valid disks which should be monitored in this task.

        `type` is specified to represent the type of SMART test to be executed.

        `all_disks` when enabled sets the task to cover all disks in which case `disks` is not required.

        .. examples(websocket)::

          Create a SMART Test Task which executes after every 30 minutes.

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "smart.test.create",
                "params": [{
                    "schedule": {
                        "minute": "30",
                        "hour": "*",
                        "dom": "*",
                        "month": "*",
                        "dow": "*"
                    },
                    "all_disks": true,
                    "type": "OFFLINE",
                    "disks": []
                }]
            }
        """
        data['type'] = data.pop('type')[0]
        verrors = await self.validate_data(data, 'smart_test_create')

        if data['all_disks']:
            if data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This test is already enabled for all disks')
        else:
            if not data.get('disks'):
                verrors.add('smart_test_create.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(data)

        data['id'] = await self.middleware.call(
            'datastore.insert', self._config.datastore, data,
            {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(data['id'])

    async def do_update(self, id, data):
        """
        Update SMART Test Task of `id`.
        """
        old = await self.query(filters=[('id', '=', id)],
                               options={'get': True})
        new = old.copy()
        new.update(data)

        new['type'] = new.pop('type')[0]
        old['type'] = old.pop('type')[0]
        new_disks = [disk for disk in new['disks'] if disk not in old['disks']]
        deleted_disks = [
            disk for disk in old['disks'] if disk not in new['disks']
        ]
        if old['type'] == new['type']:
            new['disks'] = new_disks
        verrors = await self.validate_data(new, 'smart_test_update')

        new['disks'] = [
            disk for disk in chain(new_disks, old['disks'])
            if disk not in deleted_disks
        ]

        if new['all_disks']:
            if new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This test is already enabled for all disks')
        else:
            if not new.get('disks'):
                verrors.add('smart_test_update.disks',
                            'This field is required')

        if verrors:
            raise verrors

        Cron.convert_schedule_to_db_format(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return await self.get_instance(id)

    async def do_delete(self, id):
        """
        Delete SMART Test Task of `id`.
        """
        response = await self.middleware.call('datastore.delete',
                                              self._config.datastore, id)

        asyncio.ensure_future(self._service_change('smartd', 'restart'))

        return response

    @accepts(
        List('disks',
             items=[
                 Dict(
                     'disk_run',
                     Str('identifier', required=True),
                     Str('mode',
                         enum=['FOREGROUND', 'BACKGROUND'],
                         default='BACKGROUND'),
                     Str('type',
                         enum=['LONG', 'SHORT', 'CONVEYANCE', 'OFFLINE'],
                         required=True),
                 )
             ]))
    @returns(
        List('smart_manual_test',
             items=[
                 Dict(
                     'smart_manual_test_disk_response',
                     Str('disk', required=True),
                     Str('identifier', required=True),
                     Str('error', required=True, null=True),
                     Datetime('expected_result_time'),
                     Int('job'),
                 )
             ]))
    async def manual_test(self, disks):
        """
        Run manual SMART tests for `disks`.

        `type` indicates what type of SMART test will be ran and must be specified.
        """
        verrors = ValidationErrors()
        test_disks_list = []
        if not disks:
            verrors.add('disks', 'Please specify at least one disk.')
        else:
            supported_disks = await self.disk_choices(True)
            devices = await self.middleware.call(
                'device.get_storage_devices_topology')
            valid_disks = [
                disk['identifier'] for disk in await self.middleware.call(
                    'disk.query', [(
                        'identifier', 'in',
                        [disk['identifier']
                         for disk in disks])], {'force_sql_filters': True})
            ]
            for index, disk in enumerate(disks):
                if current_disk := supported_disks.get(disk['identifier']):
                    test_disks_list.append({
                        'disk': current_disk['name'],
                        **disk
                    })
                else:
                    if disk['identifier'] in valid_disks:
                        verrors.add(
                            f'disks.{index}.identifier',
                            f'{disk["identifier"]} does not support S.M.A.R.T test.'
                        )
                    else:
                        verrors.add(
                            f'disks.{index}.identifier',
                            f'{disk["identifier"]} is not valid. Please provide a valid disk identifier.'
                        )
                    continue

                if current_disk['name'] is None:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]} disk. Failed to retrieve name.'
                    )

                device = devices.get(current_disk['name'])
                if not device:
                    verrors.add(
                        f'disks.{index}.identifier',
                        f'Test cannot be performed for {disk["identifier"]}. Unable to retrieve disk details.'
                    )

        verrors.check()

        return await asyncio_map(self.__manual_test, test_disks_list, 16)
示例#6
0
文件: disk.py 项目: bmhughes/freenas
class DiskService(CRUDService):
    class Config:
        datastore = 'storage.disk'
        datastore_prefix = 'disk_'
        datastore_extend = 'disk.disk_extend'
        datastore_extend_context = 'disk.disk_extend_context'
        datastore_primary_key = 'identifier'
        datastore_primary_key_type = 'string'
        event_register = False
        event_send = False
        cli_namespace = 'storage.disk'

    ENTRY = Dict(
        'disk_entry',
        Str('identifier', required=True),
        Str('name', required=True),
        Str('subsystem', required=True),
        Int('number', required=True),
        Str('serial', required=True),
        Int('size', required=True),
        Str('multipath_name', required=True),
        Str('multipath_member', required=True),
        Str('description', required=True),
        Str('transfermode', required=True),
        Str('hddstandby',
            required=True,
            enum=[
                'ALWAYS ON', '5', '10', '20', '30', '60', '120', '180', '240',
                '300', '330'
            ]),
        Bool('hddstandby_force', required=True),
        Bool('togglesmart', required=True),
        Str('advpowermgmt',
            required=True,
            enum=['DISABLED', '1', '64', '127', '128', '192', '254']),
        Str('smartoptions', required=True),
        Datetime('expiretime', required=True, null=True),
        Int('critical', required=True, null=True),
        Int('difference', required=True, null=True),
        Int('informational', required=True, null=True),
        Str('model', required=True, null=True),
        Int('rotationrate', required=True, null=True),
        Str('type', required=True, null=True),
        Str('zfs_guid', required=True, null=True),
        Str('devname', required=True),
        Dict('enclosure', Int('number'), Int('slot'), null=True,
             required=True),
        Str('pool', null=True, required=True),
        Str('passwd', private=True),
        Str('kmip_uid', null=True),
    )

    @filterable
    async def query(self, filters, options):
        """
        Query disks.

        The following extra options are supported:

             include_expired: true - will also include expired disks (default: false)
             passwords: true - will not hide KMIP password for the disks (default: false)
             pools: true - will join pool name for each disk (default: false)
        """
        filters = filters or []
        options = options or {}
        if not options.get('extra', {}).get('include_expired', False):
            filters += [('expiretime', '=', None)]

        return await super().query(filters, options)

    @private
    async def disk_extend(self, disk, context):
        disk.pop('enabled', None)
        for key in ['advpowermgmt', 'hddstandby']:
            disk[key] = disk[key].upper()
        try:
            disk['size'] = int(disk['size'])
        except ValueError:
            disk['size'] = None
        if disk['multipath_name']:
            disk['devname'] = f'multipath/{disk["multipath_name"]}'
        else:
            disk['devname'] = disk['name']
        self._expand_enclosure(disk)
        if context['passwords']:
            if not disk['passwd']:
                disk['passwd'] = context['disks_keys'].get(
                    disk['identifier'], '')
        else:
            disk.pop('passwd')
            disk.pop('kmip_uid')
        if disk['name'] in context['boot_pool_disks']:
            disk['pool'] = context['boot_pool_name']
        else:
            disk['pool'] = context['zfs_guid_to_pool'].get(disk['zfs_guid'])
        return disk

    @private
    async def disk_extend_context(self, rows, extra):
        context = {
            'passwords': extra.get('passwords', False),
            'disks_keys': {},
            'pools': extra.get('pools', False),
            'boot_pool_disks': [],
            'boot_pool_name': None,
            'zfs_guid_to_pool': {},
        }

        if context['passwords']:
            context['disks_keys'] = await self.middleware.call(
                'kmip.retrieve_sed_disks_keys')

        if context['pools']:
            context['boot_pool_disks'] = await self.middleware.call(
                'boot.get_disks')
            context['boot_pool_name'] = await self.middleware.call(
                'boot.pool_name')

            for pool in await self.middleware.call('zfs.pool.query'):
                topology = await self.middleware.call(
                    'pool.transform_topology_lightweight', pool['groups'])
                for vdev in await self.middleware.call('pool.flatten_topology',
                                                       topology):
                    if vdev['type'] == 'DISK':
                        context['zfs_guid_to_pool'][
                            vdev['guid']] = pool['name']

        return context

    def _expand_enclosure(self, disk):
        if disk['enclosure_slot'] is not None:
            disk['enclosure'] = {
                'number': disk['enclosure_slot'] // 1000,
                'slot': disk['enclosure_slot'] % 1000
            }
        else:
            disk['enclosure'] = None
        del disk['enclosure_slot']

    def _compress_enclosure(self, disk):
        if disk['enclosure'] is not None:
            disk['enclosure_slot'] = disk['enclosure']['number'] * 1000 + disk[
                'enclosure']['slot']
        else:
            disk['enclosure_slot'] = None
        del disk['enclosure']

    @accepts(Str('id'),
             Patch(
                 'disk_entry',
                 'disk_update',
                 ('rm', {
                     'name': 'identifier'
                 }),
                 ('rm', {
                     'name': 'name'
                 }),
                 ('rm', {
                     'name': 'subsystem'
                 }),
                 ('rm', {
                     'name': 'serial'
                 }),
                 ('rm', {
                     'name': 'kmip_uid'
                 }),
                 ('rm', {
                     'name': 'size'
                 }),
                 ('rm', {
                     'name': 'multipath_name'
                 }),
                 ('rm', {
                     'name': 'multipath_member'
                 }),
                 ('rm', {
                     'name': 'transfermode'
                 }),
                 ('rm', {
                     'name': 'expiretime'
                 }),
                 ('rm', {
                     'name': 'model'
                 }),
                 ('rm', {
                     'name': 'rotationrate'
                 }),
                 ('rm', {
                     'name': 'type'
                 }),
                 ('rm', {
                     'name': 'zfs_guid'
                 }),
                 ('rm', {
                     'name': 'devname'
                 }),
                 ('attr', {
                     'update': True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update disk of `id`.

        If extra options need to be passed to SMART which we don't already support, they can be passed by
        `smartoptions`.

        `critical`, `informational` and `difference` are integer values on which alerts for SMART are configured
        if the disk temperature crosses the assigned threshold for each respective attribute.
        If they are set to null, then SMARTD config values are used as defaults.

        Email of log level LOG_CRIT is issued when disk temperature crosses `critical`.

        Email of log level LOG_INFO is issued when disk temperature crosses `informational`.

        If temperature of a disk changes by `difference` degree Celsius since the last report, SMART reports this.
        """

        old = await self.middleware.call(
            'datastore.query', 'storage.disk', [['identifier', '=', id]], {
                'get': True,
                'prefix': self._config.datastore_prefix
            })
        old.pop('enabled', None)
        self._expand_enclosure(old)
        new = old.copy()
        new.update(data)

        verrors = ValidationErrors()

        if new['hddstandby_force']:
            if new['hddstandby'] == 'ALWAYS ON':
                verrors.add(
                    'disk_update.hddstandby_force',
                    'This option does not have sense when HDD Standby is not set'
                )

        if verrors:
            raise verrors

        if not new['passwd'] and old['passwd'] != new['passwd']:
            # We want to make sure kmip uid is None in this case
            if new['kmip_uid']:
                asyncio.ensure_future(
                    self.middleware.call('kmip.reset_sed_disk_password', id,
                                         new['kmip_uid']))
            new['kmip_uid'] = None

        for key in ['advpowermgmt', 'hddstandby']:
            new[key] = new[key].title()

        self._compress_enclosure(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        if any(new[key] != old[key] for key in ['hddstandby', 'advpowermgmt']):
            await self.middleware.call('disk.power_management', new['name'])

        if any(new[key] != old[key] for key in [
                'togglesmart',
                'smartoptions',
                'hddstandby',
                'hddstandby_force',
                'critical',
                'difference',
                'informational',
        ]):
            if new['togglesmart']:
                await self.middleware.call('disk.toggle_smart_on', new['name'])
            else:
                await self.middleware.call('disk.toggle_smart_off',
                                           new['name'])

            await self.middleware.call('disk.update_hddstandby_force')
            await self.middleware.call('disk.update_smartctl_args_for_disks')
            await self.middleware.call('service.restart', 'collectd')
            await self._service_change('smartd', 'restart')
            await self._service_change('snmp', 'restart')

        if new['passwd'] and old['passwd'] != new['passwd']:
            await self.middleware.call('kmip.sync_sed_keys', [id])

        return await self.query([['identifier', '=', id]], {'get': True})

    @private
    async def copy_settings(self, old, new):
        await self.middleware.call(
            'disk.update', new['identifier'], {
                k: v
                for k, v in old.items() if k in [
                    'togglesmart',
                    'advpowermgmt',
                    'description',
                    'hddstandby',
                    'hddstandby_force',
                    'smartoptions',
                    'critical',
                    'difference',
                    'informational',
                ]
            })

        changed = False
        for row in await self.middleware.call(
                'datastore.query', 'tasks.smarttest_smarttest_disks', [
                    ['disk_id', '=', old['identifier']],
                ], {'relationships': False}):
            try:
                await self.middleware.call(
                    'datastore.insert', 'tasks.smarttest_smarttest_disks', {
                        'smarttest_id': row['smarttest_id'],
                        'disk_id': new['identifier'],
                    })
            except IntegrityError:
                pass
            else:
                changed = True

        if changed:
            asyncio.ensure_future(self._service_change('smartd', 'restart'))

    @private
    def get_name(self, disk):
        if disk["multipath_name"]:
            return f"multipath/{disk['multipath_name']}"
        else:
            return disk["name"]

    @accepts(Bool("join_partitions", default=False))
    @returns(List('unused_disks', items=[Ref('disk_entry')]))
    async def get_unused(self, join_partitions):
        """
        Helper method to get all disks that are not in use, either by the boot
        pool or the user pools.
        """
        disks = await self.query([('devname', 'nin', await
                                   self.get_reserved())])

        if join_partitions:
            for disk in disks:
                disk['partitions'] = await self.middleware.call(
                    'disk.list_partitions', disk['devname'])

        return disks

    @private
    async def get_reserved(self):
        reserved = list(await self.middleware.call('boot.get_disks'))
        reserved += await self.middleware.call('pool.get_disks')
        if osc.IS_FREEBSD:
            # FIXME: Make this freebsd specific for now
            reserved += [i async for i in self.__get_iscsi_targets()]
        return reserved

    async def __get_iscsi_targets(self):
        iscsi_target_extent_paths = [
            extent["iscsi_target_extent_path"]
            for extent in await self.middleware.call(
                'datastore.query', 'services.iscsitargetextent', [(
                    'iscsi_target_extent_type', '=', 'Disk')])
        ]
        for disk in await self.middleware.call(
                'datastore.query', 'storage.disk',
            [('disk_identifier', 'in', iscsi_target_extent_paths)]):
            yield disk["disk_name"]

    @private
    async def check_clean(self, disk):
        return not bool(await self.middleware.call('disk.list_partitions',
                                                   disk))

    @private
    async def sed_unlock_all(self):
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        advconfig = await self.middleware.call('system.advanced.config')
        disks = await self.middleware.call('disk.query', [],
                                           {'extra': {
                                               'passwords': True
                                           }})

        # If no SED password was found we can stop here
        if not await self.middleware.call('system.advanced.sed_global_password'
                                          ) and not any(
                                              [d['passwd'] for d in disks]):
            return

        result = await asyncio_map(
            lambda disk: self.sed_unlock(disk['name'], disk, advconfig), disks,
            16)
        locked = list(filter(lambda x: x['locked'] is True, result))
        if locked:
            disk_names = ', '.join([i['name'] for i in locked])
            self.logger.warn(
                f'Failed to unlock following SED disks: {disk_names}')
            raise CallError('Failed to unlock SED disks', errno.EACCES)
        return True

    @private
    async def sed_unlock(self, disk_name, disk=None, _advconfig=None):
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        if _advconfig is None:
            _advconfig = await self.middleware.call('system.advanced.config')

        devname = await self.middleware.call('disk.sed_dev_name', disk_name)
        # We need two states to tell apart when disk was successfully unlocked
        locked = None
        unlocked = None
        password = await self.middleware.call(
            'system.advanced.sed_global_password')

        if disk is None:
            disk = await self.query([('name', '=', disk_name)],
                                    {'extra': {
                                        'passwords': True
                                    }})
            if disk and disk[0]['passwd']:
                password = disk[0]['passwd']
        elif disk.get('passwd'):
            password = disk['passwd']

        rv = {'name': disk_name, 'locked': None}

        if not password:
            # If there is no password no point in continuing
            return rv

        # Try unlocking TCG OPAL using sedutil
        cp = await run('sedutil-cli', '--query', devname, check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode(errors='ignore')
            if 'Locked = Y' in output:
                locked = True
                cp = await run('sedutil-cli',
                               '--setLockingRange',
                               '0',
                               'RW',
                               password,
                               devname,
                               check=False)
                if cp.returncode == 0:
                    locked = False
                    unlocked = True
                    # If we were able to unlock it, let's set mbrenable to off
                    if osc.IS_LINUX:
                        cp = await run('sedutil-cli',
                                       '--setMBREnable',
                                       'off',
                                       password,
                                       devname,
                                       check=False)
                        if cp.returncode:
                            self.logger.error(
                                'Failed to set MBREnable for %r to "off": %s',
                                devname,
                                cp.stderr.decode(),
                                exc_info=True)

            elif 'Locked = N' in output:
                locked = False

        # Try ATA Security if SED was not unlocked and its not locked by OPAL
        if not unlocked and not locked:
            locked, unlocked = await self.middleware.call(
                'disk.unlock_ata_security', devname, _advconfig, password)

        if osc.IS_FREEBSD and unlocked:
            try:
                # Disk needs to be retasted after unlock
                with open(f'/dev/{disk_name}', 'wb'):
                    pass
            except OSError:
                pass
        elif locked:
            self.logger.error(f'Failed to unlock {disk_name}')
        rv['locked'] = locked
        return rv

    @private
    async def sed_initial_setup(self, disk_name, password):
        """
        NO_SED - Does not support SED
        ACCESS_GRANTED - Already setup and `password` is a valid password
        LOCKING_DISABLED - Locking range is disabled
        SETUP_FAILED - Initial setup call failed
        SUCCESS - Setup successfully completed
        """
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        devname = await self.middleware.call('disk.sed_dev_name', disk_name)

        cp = await run('sedutil-cli', '--isValidSED', devname, check=False)
        if b' SED ' not in cp.stdout:
            return 'NO_SED'

        cp = await run('sedutil-cli',
                       '--listLockingRange',
                       '0',
                       password,
                       devname,
                       check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode()
            if RE_SED_RDLOCK_EN.search(output) and RE_SED_WRLOCK_EN.search(
                    output):
                return 'ACCESS_GRANTED'
            else:
                return 'LOCKING_DISABLED'

        try:
            await run('sedutil-cli', '--initialSetup', password, devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(
                f'initialSetup failed for {disk_name}:\n{e.stdout}{e.stderr}')
            return 'SETUP_FAILED'

        # OPAL 2.0 disks do not enable locking range on setup like Enterprise does
        try:
            await run('sedutil-cli', '--enableLockingRange', '0', password,
                      devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(
                f'enableLockingRange failed for {disk_name}:\n{e.stdout}{e.stderr}'
            )
            return 'SETUP_FAILED'

        return 'SUCCESS'

    def sed_dev_name(self, disk_name):
        if disk_name.startswith("nvd"):
            nvme = get_nsid(f"/dev/{disk_name}")
            return f"/dev/{nvme}"

        return f"/dev/{disk_name}"

    @private
    async def multipath_create(self, name, consumers, mode=None):
        """
        Create an Active/Passive GEOM_MULTIPATH provider
        with name ``name`` using ``consumers`` as the consumers for it

        Modes:
            A - Active/Active
            R - Active/Read
            None - Active/Passive

        Returns:
            True in case the label succeeded and False otherwise
        """
        cmd = ["/sbin/gmultipath", "label", name] + consumers
        if mode:
            cmd.insert(2, f'-{mode}')
        try:
            await run(cmd,
                      stderr=subprocess.STDOUT,
                      encoding="utf-8",
                      errors="ignore")
        except subprocess.CalledProcessError as e:
            raise CallError(f"Error creating multipath: {e.stdout}")

    async def __multipath_next(self):
        """
        Find out the next available name for a multipath named diskX
        where X is a crescenting value starting from 1

        Returns:
            The string of the multipath name to be created
        """
        await self.middleware.run_in_thread(geom.scan)
        numbers = sorted([
            int(RE_MPATH_NAME.search(g.name).group(1))
            for g in geom.class_by_name('MULTIPATH').geoms
            if RE_MPATH_NAME.match(g.name)
        ])
        if not numbers:
            numbers = [0]
        for number in range(1, numbers[-1] + 2):
            if number not in numbers:
                break
        else:
            raise ValueError('Could not find multipaths')
        return f'disk{number}'

    @private
    @accepts()
    async def multipath_sync(self):
        """
        Synchronize multipath disks

        Every distinct GEOM_DISK that shares an ident (aka disk serial)
        with conjunction of the lunid is considered a multipath and will be
        handled by GEOM_MULTIPATH.

        If the disk is not currently in use by some Volume or iSCSI Disk Extent
        then a gmultipath is automatically created and will be available for use.
        """

        await self.middleware.run_in_thread(geom.scan)

        mp_disks = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    self.logger.warn(
                        "A consumer that is not a disk (%s) is part of a "
                        "MULTIPATH, currently unsupported by middleware",
                        p_geom.clazz.name)
                    continue
                mp_disks.append(p_geom.name)

        reserved = await self.get_reserved()

        devlist = await camcontrol_list()
        is_enterprise = await self.middleware.call('system.is_enterprise')

        serials = defaultdict(list)
        active_active = []
        for g in geom.class_by_name('DISK').geoms:
            if not RE_DA.match(
                    g.name) or g.name in reserved or g.name in mp_disks:
                continue
            if is_enterprise:
                descr = g.provider.config.get('descr') or ''
                if (descr == 'STEC ZeusRAM' or descr.startswith('VIOLIN')
                        or descr.startswith('3PAR')):
                    active_active.append(g.name)
            if devlist.get(g.name, {}).get('driver') == 'umass-sim':
                continue
            serial = ''
            v = g.provider.config.get('ident')
            if v:
                # Exclude fake serial numbers e.g. `000000000000` reported by FreeBSD 12.2 USB stack
                if not v.replace('0', ''):
                    continue
                serial = v
            v = g.provider.config.get('lunid')
            if v:
                serial += v
            if not serial:
                continue
            size = g.provider.mediasize
            serials[(serial, size)].append(g.name)
            serials[(serial, size)].sort(key=lambda x: int(x[2:]))

        disks_pairs = [disks for disks in list(serials.values())]
        disks_pairs.sort(key=lambda x: int(x[0][2:]))

        # Mode is Active/Passive for TrueNAS HA
        mode = None if not is_enterprise else 'R'
        for disks in disks_pairs:
            if not len(disks) > 1:
                continue
            name = await self.__multipath_next()
            try:
                await self.multipath_create(
                    name, disks, 'A' if disks[0] in active_active else mode)
            except CallError as e:
                self.logger.error("Error creating multipath: %s", e.errmsg)

        # Scan again to take new multipaths into account
        await self.middleware.run_in_thread(geom.scan)
        mp_ids = []
        for g in geom.class_by_name('MULTIPATH').geoms:
            _disks = []
            for c in g.consumers:
                p_geom = c.provider.geom
                # For now just DISK is allowed
                if p_geom.clazz.name != 'DISK':
                    continue
                _disks.append(p_geom.name)

            qs = await self.middleware.call(
                'datastore.query', 'storage.disk', [
                    [
                        'OR',
                        [
                            ['disk_name', 'in', _disks],
                            ['disk_multipath_member', 'in', _disks],
                        ]
                    ],
                    ['disk_expiretime', '=', None],
                ])
            if qs:
                diskobj = qs[0]
                mp_ids.append(diskobj['disk_identifier'])
                update = False  # Make sure to not update if nothing changed
                if diskobj['disk_multipath_name'] != g.name:
                    update = True
                    diskobj['disk_multipath_name'] = g.name
                if diskobj['disk_name'] in _disks:
                    _disks.remove(diskobj['disk_name'])
                if _disks and diskobj['disk_multipath_member'] != _disks[-1]:
                    update = True
                    diskobj['disk_multipath_member'] = _disks.pop()
                if update:
                    await self.middleware.call('datastore.update',
                                               'storage.disk',
                                               diskobj['disk_identifier'],
                                               diskobj)

        # Update all disks which were not identified as MULTIPATH, resetting attributes
        disks = await self.middleware.call(
            'datastore.query', 'storage.disk',
            [('disk_identifier', 'nin', mp_ids)])
        for disk in disks:
            if disk['disk_multipath_name'] or disk['disk_multipath_member']:
                disk['disk_multipath_name'] = ''
                disk['disk_multipath_member'] = ''
                await self.middleware.call('datastore.update', 'storage.disk',
                                           disk['disk_identifier'], disk)

    @private
    async def check_disks_availability(self, verrors, disks, schema):
        """
        Makes sure the disks are present in the system and not reserved
        by anything else (boot, pool, iscsi, etc).

        Returns:
            dict - disk.query for all disks
        """
        disks_cache = dict(
            map(
                lambda x: (x['devname'], x), await
                self.middleware.call('disk.query',
                                     [('devname', 'in', disks)])))

        disks_set = set(disks)
        disks_not_in_cache = disks_set - set(disks_cache.keys())
        if disks_not_in_cache:
            verrors.add(
                f'{schema}.topology',
                f'The following disks were not found in system: {"," .join(disks_not_in_cache)}.'
            )

        disks_reserved = await self.middleware.call('disk.get_reserved')
        disks_reserved = disks_set - (disks_set - set(disks_reserved))
        if disks_reserved:
            verrors.add(
                f'{schema}.topology',
                f'The following disks are already in use: {"," .join(disks_reserved)}.'
            )
        return disks_cache

    @private
    async def configure_power_management(self):
        """
        This runs on boot to properly configure all power management options
        (Advanced Power Management and IDLE) for all disks.
        """
        # Do not run power management on ENTERPRISE
        if await self.middleware.call('system.product_type') == 'ENTERPRISE':
            return
        for disk in await self.middleware.call('disk.query'):
            await self.middleware.call('disk.power_management', disk['name'],
                                       disk)

    @private
    async def power_management(self, dev, disk=None):
        """
        Actually sets power management for `dev`.
        `disk` is the disk.query entry and optional so this can be called only with disk name.
        """

        if not disk:
            disk = await self.middleware.call('disk.query',
                                              [('name', '=', dev)])
            if not disk:
                return
            disk = disk[0]

        return await self.middleware.call('disk.power_management_impl', dev,
                                          disk)
示例#7
0
class AuthService(Service):
    class Config:
        cli_namespace = "auth"

    session_manager = SessionManager()

    token_manager = TokenManager()

    def __init__(self, *args, **kwargs):
        super(AuthService, self).__init__(*args, **kwargs)
        self.session_manager.middleware = self.middleware

    @filterable
    @filterable_returns(
        Dict(
            'session',
            Str('id'),
            Bool('internal'),
            Str('origin'),
            Str('credentials'),
            Datetime('created_at'),
        ))
    def sessions(self, filters, options):
        """
        Returns list of active auth sessions.

        Example of return value:

        [
            {
                "id": "NyhB1J5vjPjIV82yZ6caU12HLA1boDJcZNWuVQM4hQWuiyUWMGZTz2ElDp7Yk87d",
                "origin": "192.168.0.3:40392",
                "credentials": "TOKEN",
                "internal": False,
                "created_at": {"$date": 1545842426070}
            }
        ]

        `credentials` can be `UNIX_SOCKET`, `ROOT_TCP_SOCKET`, `TRUENAS_NODE`, `LOGIN_PASSWORD` or `TOKEN`,
        depending on what authentication method was used.

        If you want to exclude all internal connections from the list, call this method with following arguments:

        [
            [
                ["internal", "=", True]
            ]
        ]
        """
        return filter_list(
            [
                dict(id=session_id,
                     internal=is_internal_session(session),
                     **session.dump()) for session_id, session in sorted(
                         self.session_manager.sessions.items(),
                         key=lambda t: t[1].created_at)
            ],
            filters,
            options,
        )

    @accepts(Str('username'), Str('password'))
    @returns(
        Bool(
            description=
            'Is `true` if `username` was successfully validated with provided `password`'
        ))
    async def check_user(self, username, password):
        """
        Verify username and password
        """
        if username != 'root':
            return False
        try:
            user = await self.middleware.call(
                'datastore.query', 'account.bsdusers',
                [('bsdusr_username', '=', username)], {'get': True})
        except IndexError:
            return False
        if user['bsdusr_unixhash'] in ('x', '*'):
            return False
        return crypt.crypt(password,
                           user['bsdusr_unixhash']) == user['bsdusr_unixhash']

    @accepts(Int('ttl', default=600, null=True),
             Dict('attrs', additional_attrs=True))
    @returns(Str('token'))
    def generate_token(self, ttl, attrs):
        """
        Generate a token to be used for authentication.

        `ttl` stands for Time To Live, in seconds. The token will be invalidated if the connection
        has been inactive for a time greater than this.

        `attrs` is a general purpose object/dictionary to hold information about the token.
        """
        if ttl is None:
            ttl = 600

        token = self.token_manager.create(ttl, attrs)

        return token.token

    @private
    def get_token(self, token_id):
        try:
            return {
                'attributes': self.token_manager.tokens[token_id].attributes,
            }
        except KeyError:
            return None

    @no_auth_required
    @accepts()
    @returns(
        Bool('two_factor_auth_enabled',
             description='Is `true` if 2FA is enabled'))
    async def two_factor_auth(self):
        """
        Returns true if two factor authorization is required for authorizing user's login.
        """
        return (await self.middleware.call('auth.twofactor.config'))['enabled']

    @cli_private
    @no_auth_required
    @accepts(Str('username'), Str('password', private=True),
             Str('otp_token', null=True, default=None))
    @returns(Bool('successful_login'))
    @pass_app()
    async def login(self, app, username, password, otp_token):
        """
        Authenticate session using username and password.
        Currently only root user is allowed.
        `otp_token` must be specified if two factor authentication is enabled.
        """
        valid = await self.check_user(username, password)
        twofactor_auth = await self.middleware.call('auth.twofactor.config')

        if twofactor_auth['enabled']:
            # We should run auth.twofactor.verify nevertheless of check_user result to prevent guessing
            # passwords with a timing attack
            valid &= await self.middleware.call('auth.twofactor.verify',
                                                otp_token)

        if valid:
            self.session_manager.login(
                app, LoginPasswordSessionManagerCredentials())
        return valid

    @cli_private
    @no_auth_required
    @accepts(Str('api_key'))
    @returns(Bool('successful_login'))
    @pass_app()
    async def login_with_api_key(self, app, api_key):
        """
        Authenticate session using API Key.
        """
        if await self.middleware.call('api_key.authenticate', api_key):
            self.session_manager.login(app, ApiKeySessionManagerCredentials())
            return True

        return False

    @cli_private
    @accepts()
    @returns(Bool('successful_logout'))
    @pass_app()
    async def logout(self, app):
        """
        Deauthenticates an app and if a token exists, removes that from the
        session.
        """
        self.session_manager.logout(app)
        return True

    @cli_private
    @no_auth_required
    @accepts(Str('token'))
    @returns(Bool('successful_login'))
    @pass_app()
    def token(self, app, token):
        """Authenticate using a given `token` id."""
        token = self.token_manager.get(token)
        if token is None:
            return False

        self.session_manager.login(
            app, TokenSessionManagerCredentials(self.token_manager, token))
        return True
示例#8
0
class AlertService(Service):
    class Config:
        cli_namespace = "system.alert"

    def __init__(self, middleware):
        super().__init__(middleware)

        self.blocked_sources = defaultdict(set)
        self.sources_locks = {}

        self.blocked_failover_alerts_until = 0

    @private
    async def load(self):
        main_sources_dir = os.path.join(get_middlewared_dir(), "alert",
                                        "source")
        sources_dirs = [
            os.path.join(overlay_dir, "alert", "source")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        sources_dirs.insert(0, main_sources_dir)
        for sources_dir in sources_dirs:
            for module in load_modules(sources_dir):
                for cls in load_classes(
                        module, AlertSource,
                    (FilePresenceAlertSource, ThreadedAlertSource)):
                    source = cls(self.middleware)
                    if source.name in ALERT_SOURCES:
                        raise RuntimeError(
                            f"Alert source {source.name} is already registered"
                        )
                    ALERT_SOURCES[source.name] = source

        main_services_dir = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.path.pardir,
            "alert", "service")
        services_dirs = [
            os.path.join(overlay_dir, "alert", "service")
            for overlay_dir in self.middleware.overlay_dirs
        ]
        services_dirs.insert(0, main_services_dir)
        for services_dir in services_dirs:
            for module in load_modules(services_dir):
                for cls in load_classes(
                        module, _AlertService,
                    (ThreadedAlertService, ProThreadedAlertService)):
                    ALERT_SERVICES_FACTORIES[cls.name()] = cls

    @private
    async def initialize(self, load=True):
        is_enterprise = await self.middleware.call("system.is_enterprise")

        self.node = "A"
        if is_enterprise:
            if await self.middleware.call("failover.node") == "B":
                self.node = "B"

        self.alerts = []
        if load:
            for alert in await self.middleware.call("datastore.query",
                                                    "system.alert"):
                del alert["id"]

                try:
                    alert["klass"] = AlertClass.class_by_name[alert["klass"]]
                except KeyError:
                    self.logger.info("Alert class %r is no longer present",
                                     alert["klass"])
                    continue

                alert["_uuid"] = alert.pop("uuid")
                alert["_source"] = alert.pop("source")
                alert["_key"] = alert.pop("key")
                alert["_text"] = alert.pop("text")

                alert = Alert(**alert)

                if not any(a.uuid == alert.uuid for a in self.alerts):
                    self.alerts.append(alert)

        self.alert_source_last_run = defaultdict(lambda: datetime.min)

        self.policies = {
            "IMMEDIATELY": AlertPolicy(),
            "HOURLY": AlertPolicy(lambda d: (d.date(), d.hour)),
            "DAILY": AlertPolicy(lambda d: (d.date())),
            "NEVER": AlertPolicy(lambda d: None),
        }
        for policy in self.policies.values():
            policy.receive_alerts(datetime.utcnow(), self.alerts)

    @private
    async def terminate(self):
        await self.flush_alerts()

    @accepts()
    @returns(List('alert_policies', items=[Str('policy', enum=POLICIES)]))
    async def list_policies(self):
        """
        List all alert policies which indicate the frequency of the alerts.
        """
        return POLICIES

    @accepts()
    @returns(
        List('categories',
             items=[
                 Dict(
                     'category', Str('id'), Str('title'),
                     List('classes',
                          items=[
                              Dict(
                                  'category_class',
                                  Str('id'),
                                  Str('title'),
                                  Str('level'),
                              )
                          ]))
             ]))
    async def list_categories(self):
        """
        List all types of alerts which the system can issue.
        """

        product_type = await self.middleware.call("alert.product_type")

        classes = [
            alert_class for alert_class in AlertClass.classes
            if product_type in alert_class.products
            and not alert_class.exclude_from_list
        ]

        return [{
            "id":
            alert_category.name,
            "title":
            alert_category_names[alert_category],
            "classes":
            sorted([{
                "id": alert_class.name,
                "title": alert_class.title,
                "level": alert_class.level.name,
            } for alert_class in classes
                    if alert_class.category == alert_category],
                   key=lambda klass: klass["title"])
        } for alert_category in AlertCategory if any(
            alert_class.category == alert_category for alert_class in classes)]

    @private
    async def list_sources(self):
        # TODO: this is a deprecated method for backward compatibility

        return [{
            "name": klass["id"],
            "title": klass["title"],
        } for klass in sum(
            [v["classes"] for v in await self.list_categories()], [])]

    @accepts()
    @returns(
        List('alerts',
             items=[
                 Dict(
                     'alert',
                     Str('uuid'),
                     Str('source'),
                     Str('klass'),
                     Any('args'),
                     Str('node'),
                     Str('key'),
                     Datetime('datetime'),
                     Datetime('last_occurrence'),
                     Bool('dismissed'),
                     Any('mail', null=True),
                     Str('text'),
                     Str('id'),
                     Str('level'),
                     Str('formatted', null=True),
                     Bool('one_shot'),
                 )
             ]))
    async def list(self):
        """
        List all types of alerts including active/dismissed currently in the system.
        """

        as_ = AlertSerializer(self.middleware)

        return [
            await as_.serialize(alert) for alert in sorted(
                self.alerts,
                key=lambda alert: (alert.klass.title, alert.datetime))
            if await as_.should_show_alert(alert)
        ]

    @private
    async def node_map(self):
        nodes = {
            'A': 'Controller A',
            'B': 'Controller B',
        }
        if await self.middleware.call('failover.licensed'):
            node = await self.middleware.call('failover.node')
            status = await self.middleware.call('failover.status')
            if status == 'MASTER':
                if node == 'A':
                    nodes = {
                        'A': 'Active Controller (A)',
                        'B': 'Standby Controller (B)',
                    }
                else:
                    nodes = {
                        'A': 'Standby Controller (A)',
                        'B': 'Active Controller (B)',
                    }
            else:
                nodes[node] = f'{status.title()} Controller ({node})'

        return nodes

    def __alert_by_uuid(self, uuid):
        try:
            return [a for a in self.alerts if a.uuid == uuid][0]
        except IndexError:
            return None

    @accepts(Str("uuid"))
    @returns()
    async def dismiss(self, uuid):
        """
        Dismiss `id` alert.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        if issubclass(alert.klass, DismissableAlertClass):
            related_alerts, unrelated_alerts = bisect(
                lambda a: (a.node, a.klass) == (alert.node, alert.klass),
                self.alerts)
            left_alerts = await alert.klass(self.middleware
                                            ).dismiss(related_alerts, alert)
            for deleted_alert in related_alerts:
                if deleted_alert not in left_alerts:
                    self._delete_on_dismiss(deleted_alert)
        elif issubclass(
                alert.klass,
                OneShotAlertClass) and not alert.klass.deleted_automatically:
            self._delete_on_dismiss(alert)
        else:
            alert.dismissed = True
            await self._send_alert_changed_event(alert)

    def _delete_on_dismiss(self, alert):
        self.alerts.remove(alert)

        for policy in self.policies.values():
            policy.last_key_value_alerts.pop(alert.uuid, None)

        self._send_alert_deleted_event(alert)

    @accepts(Str("uuid"))
    @returns()
    async def restore(self, uuid):
        """
        Restore `id` alert which had been dismissed.
        """

        alert = self.__alert_by_uuid(uuid)
        if alert is None:
            return

        alert.dismissed = False

        await self._send_alert_changed_event(alert)

    async def _send_alert_changed_event(self, alert):
        as_ = AlertSerializer(self.middleware)
        if await as_.should_show_alert(alert):
            self.middleware.send_event("alert.list",
                                       "CHANGED",
                                       id=alert.uuid,
                                       fields=await as_.serialize(alert))

    def _send_alert_deleted_event(self, alert):
        self.middleware.send_event("alert.list",
                                   "CHANGED",
                                   id=alert.uuid,
                                   cleared=True)

    @periodic(60)
    @private
    @job(lock="process_alerts", transient=True, lock_queue_size=1)
    async def process_alerts(self, job):
        if not await self.__should_run_or_send_alerts():
            return

        valid_alerts = copy.deepcopy(self.alerts)
        await self.__run_alerts()

        self.__expire_alerts()

        if not await self.__should_run_or_send_alerts():
            self.alerts = valid_alerts
            return

        await self.middleware.call("alert.send_alerts")

    @private
    @job(lock="process_alerts", transient=True)
    async def send_alerts(self, job):
        global SEND_ALERTS_ON_READY

        if await self.middleware.call("system.state") != "READY":
            SEND_ALERTS_ON_READY = True
            return

        product_type = await self.middleware.call("alert.product_type")
        classes = (await
                   self.middleware.call("alertclasses.config"))["classes"]

        now = datetime.utcnow()
        for policy_name, policy in self.policies.items():
            gone_alerts, new_alerts = policy.receive_alerts(now, self.alerts)

            for alert_service_desc in await self.middleware.call(
                    "datastore.query", "system.alertservice",
                [["enabled", "=", True]]):
                service_level = AlertLevel[alert_service_desc["level"]]

                service_alerts = [
                    alert for alert in self.alerts
                    if (product_type in alert.klass.products and
                        get_alert_level(alert, classes).value >= service_level.
                        value and get_alert_policy(alert, classes) != "NEVER")
                ]
                service_gone_alerts = [
                    alert for alert in gone_alerts if
                    (product_type in alert.klass.products and get_alert_level(
                        alert, classes).value >= service_level.value
                     and get_alert_policy(alert, classes) == policy_name)
                ]
                service_new_alerts = [
                    alert for alert in new_alerts if
                    (product_type in alert.klass.products and get_alert_level(
                        alert, classes).value >= service_level.value
                     and get_alert_policy(alert, classes) == policy_name)
                ]
                for gone_alert in list(service_gone_alerts):
                    for new_alert in service_new_alerts:
                        if gone_alert.klass == new_alert.klass and gone_alert.key == new_alert.key:
                            service_gone_alerts.remove(gone_alert)
                            service_new_alerts.remove(new_alert)
                            break

                if not service_gone_alerts and not service_new_alerts:
                    continue

                factory = ALERT_SERVICES_FACTORIES.get(
                    alert_service_desc["type"])
                if factory is None:
                    self.logger.error("Alert service %r does not exist",
                                      alert_service_desc["type"])
                    continue

                try:
                    alert_service = factory(self.middleware,
                                            alert_service_desc["attributes"])
                except Exception:
                    self.logger.error(
                        "Error creating alert service %r with parameters=%r",
                        alert_service_desc["type"],
                        alert_service_desc["attributes"],
                        exc_info=True)
                    continue

                alerts = [
                    alert for alert in service_alerts if not alert.dismissed
                ]
                service_gone_alerts = [
                    alert for alert in service_gone_alerts
                    if not alert.dismissed
                ]
                service_new_alerts = [
                    alert for alert in service_new_alerts
                    if not alert.dismissed
                ]

                if alerts or service_gone_alerts or service_new_alerts:
                    try:
                        await alert_service.send(alerts, service_gone_alerts,
                                                 service_new_alerts)
                    except Exception:
                        self.logger.error("Error in alert service %r",
                                          alert_service_desc["type"],
                                          exc_info=True)

            if policy_name == "IMMEDIATELY":
                as_ = AlertSerializer(self.middleware)
                for alert in gone_alerts:
                    if await as_.should_show_alert(alert):
                        self._send_alert_deleted_event(alert)
                for alert in new_alerts:
                    if await as_.should_show_alert(alert):
                        self.middleware.send_event(
                            "alert.list",
                            "ADDED",
                            id=alert.uuid,
                            fields=await as_.serialize(alert),
                        )

                for alert in new_alerts:
                    if alert.mail:
                        await self.middleware.call("mail.send", alert.mail)

                if await self.middleware.call("system.is_enterprise"):
                    new_hardware_alerts = [
                        alert for alert in new_alerts if alert.klass.hardware
                    ]
                    if new_hardware_alerts:
                        if await self.middleware.call(
                                "support.is_available_and_enabled"):
                            support = await self.middleware.call(
                                "support.config")
                            msg = [
                                f"* {alert.formatted}"
                                for alert in new_hardware_alerts
                            ]

                            serial = (
                                await
                                self.middleware.call("system.dmidecode_info")
                            )["system-serial-number"]

                            for name, verbose_name in await self.middleware.call(
                                    "support.fields"):
                                value = support[name]
                                if value:
                                    msg += [
                                        "",
                                        "{}: {}".format(verbose_name, value)
                                    ]

                            msg = "\n".join(msg)

                            job = await self.middleware.call(
                                "support.new_ticket", {
                                    "title": "Automatic alert (%s)" % serial,
                                    "body": msg,
                                    "attach_debug": False,
                                    "category": "Hardware",
                                    "criticality": "Loss of Functionality",
                                    "environment": "Production",
                                    "name": "Automatic Alert",
                                    "email": "*****@*****.**",
                                    "phone": "-",
                                })
                            await job.wait()
                            if job.error:
                                await self.middleware.call(
                                    "alert.oneshot_create",
                                    "AutomaticAlertFailed", {
                                        "serial": serial,
                                        "alert": msg,
                                        "error": str(job.error)
                                    })

    def __uuid(self):
        return str(uuid.uuid4())

    async def __should_run_or_send_alerts(self):
        if await self.middleware.call('system.state') != 'READY':
            return False

        if await self.middleware.call('failover.licensed'):
            status = await self.middleware.call('failover.status')
            if status == 'BACKUP' or await self.middleware.call(
                    'failover.in_progress'):
                return False

        return True

    async def __run_alerts(self):
        master_node = "A"
        backup_node = "B"
        product_type = await self.middleware.call("alert.product_type")
        run_on_backup_node = False
        run_failover_related = False
        if product_type == "ENTERPRISE":
            if await self.middleware.call("failover.licensed"):
                if await self.middleware.call("failover.node") == "B":
                    master_node = "B"
                    backup_node = "A"
                try:
                    remote_version = await self.middleware.call(
                        "failover.call_remote", "system.version")
                    remote_system_state = await self.middleware.call(
                        "failover.call_remote", "system.state")
                    remote_failover_status = await self.middleware.call(
                        "failover.call_remote", "failover.status")
                except Exception:
                    pass
                else:
                    if remote_version == await self.middleware.call(
                            "system.version"):
                        if remote_system_state == "READY" and remote_failover_status == "BACKUP":
                            run_on_backup_node = True

            run_failover_related = time.monotonic(
            ) > self.blocked_failover_alerts_until

        for k, source_lock in list(self.sources_locks.items()):
            if source_lock.expires_at <= time.monotonic():
                await self.unblock_source(k)

        for alert_source in ALERT_SOURCES.values():
            if product_type not in alert_source.products:
                continue

            if alert_source.failover_related and not run_failover_related:
                continue

            if not alert_source.schedule.should_run(
                    datetime.utcnow(),
                    self.alert_source_last_run[alert_source.name]):
                continue

            self.alert_source_last_run[alert_source.name] = datetime.utcnow()

            alerts_a = [
                alert for alert in self.alerts if alert.node == master_node
                and alert.source == alert_source.name
            ]
            locked = False
            if self.blocked_sources[alert_source.name]:
                self.logger.debug(
                    "Not running alert source %r because it is blocked",
                    alert_source.name)
                locked = True
            else:
                self.logger.trace("Running alert source: %r",
                                  alert_source.name)

                try:
                    alerts_a = await self.__run_source(alert_source.name)
                except UnavailableException:
                    pass
            for alert in alerts_a:
                alert.node = master_node

            alerts_b = []
            if run_on_backup_node and alert_source.run_on_backup_node:
                try:
                    alerts_b = [
                        alert for alert in self.alerts
                        if alert.node == backup_node
                        and alert.source == alert_source.name
                    ]
                    try:
                        if not locked:
                            alerts_b = await self.middleware.call(
                                "failover.call_remote", "alert.run_source",
                                [alert_source.name])

                            alerts_b = [
                                Alert(**dict(
                                    {
                                        k: v
                                        for k, v in alert.items() if k in [
                                            "args", "datetime",
                                            "last_occurrence", "dismissed",
                                            "mail"
                                        ]
                                    },
                                    klass=AlertClass.class_by_name[
                                        alert["klass"]],
                                    _source=alert["source"],
                                    _key=alert["key"])) for alert in alerts_b
                            ]
                    except CallError as e:
                        if e.errno in [
                                errno.ECONNABORTED, errno.ECONNREFUSED,
                                errno.ECONNRESET, errno.EHOSTDOWN,
                                errno.ETIMEDOUT,
                                CallError.EALERTCHECKERUNAVAILABLE
                        ]:
                            pass
                        else:
                            raise
                except ReserveFDException:
                    self.logger.debug('Failed to reserve a privileged port')
                except Exception:
                    alerts_b = [
                        Alert(AlertSourceRunFailedOnBackupNodeAlertClass,
                              args={
                                  "source_name": alert_source.name,
                                  "traceback": traceback.format_exc(),
                              },
                              _source=alert_source.name)
                    ]

            for alert in alerts_b:
                alert.node = backup_node

            for alert in alerts_a + alerts_b:
                self.__handle_alert(alert)

            self.alerts = (
                [a for a in self.alerts if a.source != alert_source.name] +
                alerts_a + alerts_b)

    def __handle_alert(self, alert):
        try:
            existing_alert = [
                a for a in self.alerts if (a.node, a.source, a.klass,
                                           a.key) == (alert.node, alert.source,
                                                      alert.klass, alert.key)
            ][0]
        except IndexError:
            existing_alert = None

        if existing_alert is None:
            alert.uuid = self.__uuid()
        else:
            alert.uuid = existing_alert.uuid
        if existing_alert is None:
            alert.datetime = alert.datetime or datetime.utcnow()
            if alert.datetime.tzinfo is not None:
                alert.datetime = alert.datetime.astimezone(
                    timezone.utc).replace(tzinfo=None)
        else:
            alert.datetime = existing_alert.datetime
        alert.last_occurrence = datetime.utcnow()
        if existing_alert is None:
            alert.dismissed = False
        else:
            alert.dismissed = existing_alert.dismissed

    def __expire_alerts(self):
        self.alerts = list(
            filter(lambda alert: not self.__should_expire_alert(alert),
                   self.alerts))

    def __should_expire_alert(self, alert):
        if issubclass(alert.klass, OneShotAlertClass):
            if alert.klass.expires_after is not None:
                return alert.last_occurrence < datetime.utcnow(
                ) - alert.klass.expires_after

        return False

    @private
    async def run_source(self, source_name):
        try:
            return [
                dict(alert.__dict__, klass=alert.klass.name)
                for alert in await self.__run_source(source_name)
            ]
        except UnavailableException:
            raise CallError("This alert checker is unavailable",
                            CallError.EALERTCHECKERUNAVAILABLE)

    @private
    async def block_source(self, source_name, timeout=3600):
        if source_name not in ALERT_SOURCES:
            raise CallError("Invalid alert source")

        lock = str(uuid.uuid4())
        self.blocked_sources[source_name].add(lock)
        self.sources_locks[lock] = AlertSourceLock(source_name,
                                                   time.monotonic() + timeout)
        return lock

    @private
    async def unblock_source(self, lock):
        source_lock = self.sources_locks.pop(lock, None)
        if source_lock:
            self.blocked_sources[source_lock.source_name].remove(lock)

    @private
    async def block_failover_alerts(self):
        # This values come from observation from support of how long a M-series boot can take.
        self.blocked_failover_alerts_until = time.monotonic() + 900

    async def __run_source(self, source_name):
        alert_source = ALERT_SOURCES[source_name]

        try:
            alerts = (await alert_source.check()) or []
        except UnavailableException:
            raise
        except Exception as e:
            if isinstance(e, CallError) and e.errno in [
                    errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET,
                    errno.EHOSTDOWN, errno.ETIMEDOUT
            ]:
                alerts = [
                    Alert(AlertSourceRunFailedAlertClass,
                          args={
                              "source_name": alert_source.name,
                              "traceback": str(e),
                          })
                ]
            else:
                alerts = [
                    Alert(AlertSourceRunFailedAlertClass,
                          args={
                              "source_name": alert_source.name,
                              "traceback": traceback.format_exc(),
                          })
                ]
        else:
            if not isinstance(alerts, list):
                alerts = [alerts]

        for alert in alerts:
            alert.source = source_name

        return alerts

    @periodic(3600, run_on_start=False)
    @private
    async def flush_alerts(self):
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        await self.middleware.call("datastore.delete", "system.alert", [])

        for alert in self.alerts:
            d = alert.__dict__.copy()
            d["klass"] = d["klass"].name
            del d["mail"]
            await self.middleware.call("datastore.insert", "system.alert", d)

    @private
    @accepts(Str("klass"), Any("args", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_create(self, job, klass, args):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert class: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert class")

        alert = await klass(self.middleware).create(args)
        if alert is None:
            return

        alert.source = ""
        alert.klass = alert.klass

        alert.node = self.node

        self.__handle_alert(alert)

        self.alerts = [a
                       for a in self.alerts if a.uuid != alert.uuid] + [alert]

        await self.middleware.call("alert.send_alerts")

    @private
    @accepts(Str("klass"), Any("query", null=True))
    @job(lock="process_alerts", transient=True)
    async def oneshot_delete(self, job, klass, query):
        try:
            klass = AlertClass.class_by_name[klass]
        except KeyError:
            raise CallError(f"Invalid alert source: {klass!r}")

        if not issubclass(klass, OneShotAlertClass):
            raise CallError(
                f"Alert class {klass!r} is not a one-shot alert source")

        related_alerts, unrelated_alerts = bisect(
            lambda a: (a.node, a.klass) == (self.node, klass), self.alerts)
        left_alerts = await klass(self.middleware
                                  ).delete(related_alerts, query)
        deleted = False
        for deleted_alert in related_alerts:
            if deleted_alert not in left_alerts:
                self.alerts.remove(deleted_alert)
                deleted = True

        if deleted:
            await self.middleware.call("alert.send_alerts")

    @private
    def alert_source_clear_run(self, name):
        alert_source = ALERT_SOURCES.get(name)
        if not alert_source:
            raise CallError("Alert source {name!r} not found.", errno.ENOENT)

        self.alert_source_last_run[alert_source.name] = datetime.min

    @private
    async def product_type(self):
        return await self.middleware.call("system.product_type")
示例#9
0
文件: bootenv.py 项目: yaplej/freenas
class BootEnvService(CRUDService):

    class Config:
        datastore_primary_key_type = 'string'
        cli_namespace = 'system.bootenv'

    BE_TOOL = 'zectl' if osc.IS_LINUX else 'beadm'
    ENTRY = Dict(
        'bootenv_entry',
        Str('id'),
        Str('realname'),
        Str('name'),
        Str('active'),
        Bool('activated'),
        Bool('can_activate'),
        Str('mountpoint'),
        Str('space'),
        Datetime('created'),
        Bool('keep'),
        Int('rawspace'),
        additional_attrs=True
    )

    @filterable
    def query(self, filters, options):
        """
        Query all Boot Environments with `query-filters` and `query-options`.
        """
        results = []

        cp = subprocess.run([self.BE_TOOL, 'list', '-H'], capture_output=True, text=True)
        datasets_origins = [
            d['properties']['origin']['parsed']
            for d in self.middleware.call_sync('zfs.dataset.query', [], {'extra': {'properties': ['origin']}})
        ]
        boot_pool = self.middleware.call_sync('boot.pool_name')
        for line in cp.stdout.strip().split('\n'):
            fields = line.split('\t')
            name = fields[0]
            if len(fields) > 5 and fields[5] != '-':
                name = fields[5]
            be = {
                'id': name,
                'realname': fields[0],
                'name': name,
                'active': fields[1],
                'activated': 'n' in fields[1].lower(),
                'can_activate': False,
                'mountpoint': fields[2],
                'space': None if osc.IS_LINUX else fields[3],
                'created': datetime.strptime(fields[3 if osc.IS_LINUX else 4], '%Y-%m-%d %H:%M'),
                'keep': False,
                'rawspace': None
            }

            ds = self.middleware.call_sync('zfs.dataset.query', [
                ('id', '=', rf'{boot_pool}/ROOT/{fields[0]}'),
            ], {'extra': {'snapshots': True}})
            if ds:
                ds = ds[0]
                snapshot = None
                origin = ds['properties']['origin']['parsed']
                if '@' in origin:
                    snapshot = self.middleware.call_sync('zfs.snapshot.query', [('id', '=', origin)])
                    if snapshot:
                        snapshot = snapshot[0]
                if f'{self.BE_TOOL}:keep' in ds['properties']:
                    if ds['properties'][f'{self.BE_TOOL}:keep']['value'] == 'True':
                        be['keep'] = True
                    elif ds['properties'][f'{self.BE_TOOL}:keep']['value'] == 'False':
                        be['keep'] = False

                # When a BE is deleted, following actions happen
                # 1) It's descendants ( if any ) are promoted once
                # 2) BE is deleted
                # 3) Filesystems dependent on BE's origin are promoted
                # 4) Origin is deleted
                #
                # Now we would like to find out the space which will be freed when a BE is removed.
                # We classify a BE as of being 2 types,
                # 1) BE without descendants
                # 2) BE with descendants
                #
                # For (1), space freed is "usedbydataset" property and space freed by it's "origin".
                # For (2), space freed is "usedbydataset" property and space freed by it's "origin" but this cannot
                # actively determined because all the descendants are promoted once for this BE and at the end origin
                # of current BE would be determined by last descendant promoted. So we ignore this for now and rely
                # only on the space it is currently consuming as a best effort to predict.
                # There is also "usedbysnaps" property, for that we will retrieve all snapshots of the dataset,
                # find if any of them do not have a dataset cloned, that space will also be freed when we delete
                # this dataset. And we will also factor in the space consumed by children.

                be['rawspace'] = ds['properties']['usedbydataset']['parsed'] + ds[
                    'properties']['usedbychildren']['parsed']

                children = False
                for snap in ds['snapshots']:
                    if snap['name'] not in datasets_origins:
                        be['rawspace'] += self.middleware.call_sync(
                            'zfs.snapshot.get_instance', snap['name'], {'extra': {'properties': ['used']}}
                        )['properties']['used']['parsed']
                    else:
                        children = True

                if snapshot and not children:
                    # This indicates the current BE is a leaf and it is safe to add the BE's origin
                    # space to the space freed when it is deleted.
                    be['rawspace'] += snapshot['properties']['used']['parsed']

                if be['rawspace'] < 1024:
                    be['space'] = f'{be["rawspace"]}B'
                elif 1024 <= be['rawspace'] < 1048576:
                    be['space'] = f'{be["rawspace"] / 1024}K'
                elif 1048576 <= be['rawspace'] < 1073741824:
                    be['space'] = f'{be["rawspace"] / 1048576}M'
                elif 1073741824 <= be['rawspace'] < 1099511627776:
                    be['space'] = f'{be["rawspace"] / 1073741824}G'
                elif 1099511627776 <= be['rawspace'] < 1125899906842624:
                    be['space'] = f'{be["rawspace"] / 1099511627776}T'
                elif 1125899906842624 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1125899906842624}P'
                elif 1152921504606846976 <= be['rawspace'] < 1152921504606846976:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}E'
                else:
                    be['space'] = f'{be["rawspace"] / 1152921504606846976}Z'

                be['space'] = f'{round(float(be["space"][:-1]), 2)}{be["space"][-1]}'

                if osc.IS_FREEBSD:
                    be['can_activate'] = 'truenas:kernel_version' not in ds['properties']
                if osc.IS_LINUX:
                    be['can_activate'] = (
                        'truenas:kernel_version' in ds['properties'] or
                        'truenas:12' in ds['properties']
                    )

            results.append(be)
        return filter_list(results, filters, options)

    @item_method
    @accepts(Str('id'))
    @returns(Bool('successfully_activated'))
    def activate(self, oid):
        """
        Activates boot environment `id`.
        """
        be = self.middleware.call_sync('bootenv.query', [['id', '=', oid]], {'get': True})
        if not be['can_activate']:
            raise CallError('This BE cannot be activated')

        try:
            subprocess.run([self.BE_TOOL, 'activate', oid], capture_output=True, text=True, check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to activate BE: {cpe.stdout.strip()}')
        else:
            return True

    @item_method
    @accepts(
        Str('id'),
        Dict(
            'attributes',
            Bool('keep', default=False),
        )
    )
    @returns(Bool('successfully_set_attribute'))
    async def set_attribute(self, oid, attrs):
        """
        Sets attributes boot environment `id`.

        Currently only `keep` attribute is allowed.
        """
        boot_pool = await self.middleware.call('boot.pool_name')
        boot_env = await self.get_instance(oid)
        dsname = f'{boot_pool}/ROOT/{boot_env["realname"]}'
        ds = await self.middleware.call('zfs.dataset.query', [('id', '=', dsname)])
        if not ds:
            raise CallError(f'BE {oid!r} does not exist.', errno.ENOENT)
        await self.middleware.call('zfs.dataset.update', dsname, {
            'properties': {f'{self.BE_TOOL}:keep': {'value': str(attrs['keep'])}},
        })
        return True

    @accepts(Dict(
        'bootenv_create',
        Str('name', required=True, validators=[Match(RE_BE_NAME)]),
        Str('source'),
    ))
    @returns(Str('bootenv_name'))
    async def do_create(self, data):
        """
        Create a new boot environment using `name`.

        If a new boot environment is desired which is a clone of another boot environment, `source` can be passed.
        Then, a new boot environment of `name` is created using boot environment `source` by cloning it.

        Ensure that `name` and `source` are valid boot environment names.
        """
        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_create', data['name'])
        verrors.check()

        args = [self.BE_TOOL, 'create']
        source = data.get('source')
        if source:
            args += [
                '-e', os.path.join(
                    await self.middleware.call('boot.pool_name'), 'ROOT', source
                ) if osc.IS_LINUX else source
            ]
        args.append(data['name'])
        try:
            await run(args, encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to create boot environment: {cpe.stdout}')
        return data['name']

    @accepts(Str('id'), Dict(
        'bootenv_update',
        Str('name', required=True, validators=[Match(RE_BE_NAME)]),
    ))
    @returns(Str('bootenv_name'))
    async def do_update(self, oid, data):
        """
        Update `id` boot environment name with a new provided valid `name`.
        """
        await self._get_instance(oid)

        verrors = ValidationErrors()
        await self._clean_be_name(verrors, 'bootenv_update', data['name'])
        verrors.check()

        try:
            await run(self.BE_TOOL, 'rename', oid, data['name'], encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to update boot environment: {cpe.stderr}')
        return data['name']

    async def _clean_be_name(self, verrors, schema, name):
        beadm_names = (await (await Popen(
            f"{self.BE_TOOL} list -H | awk '{{print ${1 if osc.IS_LINUX else 7}}}'",
            shell=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )).communicate())[0].decode().split('\n')
        if name in filter(None, beadm_names):
            verrors.add(f'{schema}.name', f'The name "{name}" already exists', errno.EEXIST)

    @accepts(Str('id'))
    @job(lock=lambda args: f'bootenv_delete_{args[0]}')
    async def do_delete(self, job, oid):
        """
        Delete `id` boot environment. This removes the clone from the system.
        """
        be = await self._get_instance(oid)
        try:
            await run(self.BE_TOOL, 'destroy', '-F', be['id'], encoding='utf8', check=True)
        except subprocess.CalledProcessError as cpe:
            raise CallError(f'Failed to delete boot environment: {cpe.stdout}')
        return True
示例#10
0
class DiskService(CRUDService):
    class Config:
        datastore = 'storage.disk'
        datastore_prefix = 'disk_'
        datastore_extend = 'disk.disk_extend'
        datastore_extend_context = 'disk.disk_extend_context'
        datastore_primary_key = 'identifier'
        datastore_primary_key_type = 'string'
        event_register = False
        event_send = False
        cli_namespace = 'storage.disk'

    ENTRY = Dict(
        'disk_entry',
        Str('identifier', required=True),
        Str('name', required=True),
        Str('subsystem', required=True),
        Int('number', required=True),
        Str('serial', required=True),
        Str('lunid', required=True, null=True),
        Int('size', required=True),
        Str('description', required=True),
        Str('transfermode', required=True),
        Str('hddstandby',
            required=True,
            enum=[
                'ALWAYS ON', '5', '10', '20', '30', '60', '120', '180', '240',
                '300', '330'
            ]),
        Bool('togglesmart', required=True),
        Str('advpowermgmt',
            required=True,
            enum=['DISABLED', '1', '64', '127', '128', '192', '254']),
        Str('smartoptions', required=True),
        Datetime('expiretime', required=True, null=True),
        Int('critical', required=True, null=True),
        Int('difference', required=True, null=True),
        Int('informational', required=True, null=True),
        Str('model', required=True, null=True),
        Int('rotationrate', required=True, null=True),
        Str('type', required=True, null=True),
        Str('zfs_guid', required=True, null=True),
        Str('bus', required=True),
        Str('devname', required=True),
        Dict('enclosure', Int('number'), Int('slot'), null=True,
             required=True),
        Str('pool', null=True, required=True),
        Str('passwd', private=True),
        Str('kmip_uid', null=True),
    )

    @filterable
    async def query(self, filters, options):
        """
        Query disks.

        The following extra options are supported:

             include_expired: true - will also include expired disks (default: false)
             passwords: true - will not hide KMIP password for the disks (default: false)
             pools: true - will join pool name for each disk (default: false)
        """
        filters = filters or []
        options = options or {}
        if not options.get('extra', {}).get('include_expired', False):
            filters += [('expiretime', '=', None)]

        return await super().query(filters, options)

    @private
    async def disk_extend(self, disk, context):
        disk.pop('enabled', None)
        for key in ['advpowermgmt', 'hddstandby']:
            disk[key] = disk[key].upper()
        try:
            disk['size'] = int(disk['size'])
        except ValueError:
            disk['size'] = None

        disk['devname'] = disk['name']
        self._expand_enclosure(disk)
        if context['passwords']:
            if not disk['passwd']:
                disk['passwd'] = context['disks_keys'].get(
                    disk['identifier'], '')
        else:
            disk.pop('passwd')
            disk.pop('kmip_uid')
        if disk['name'] in context['boot_pool_disks']:
            disk['pool'] = context['boot_pool_name']
        else:
            disk['pool'] = context['zfs_guid_to_pool'].get(disk['zfs_guid'])
        return disk

    @private
    async def disk_extend_context(self, rows, extra):
        context = {
            'passwords': extra.get('passwords', False),
            'disks_keys': {},
            'pools': extra.get('pools', False),
            'boot_pool_disks': [],
            'boot_pool_name': None,
            'zfs_guid_to_pool': {},
        }

        if context['passwords']:
            context['disks_keys'] = await self.middleware.call(
                'kmip.retrieve_sed_disks_keys')

        if context['pools']:
            context['boot_pool_disks'] = await self.middleware.call(
                'boot.get_disks')
            context['boot_pool_name'] = await self.middleware.call(
                'boot.pool_name')

            for pool in await self.middleware.call('zfs.pool.query'):
                topology = await self.middleware.call(
                    'pool.transform_topology_lightweight', pool['groups'])
                for vdev in await self.middleware.call('pool.flatten_topology',
                                                       topology):
                    if vdev['type'] == 'DISK':
                        context['zfs_guid_to_pool'][
                            vdev['guid']] = pool['name']

        return context

    def _expand_enclosure(self, disk):
        if disk['enclosure_slot'] is not None:
            disk['enclosure'] = {
                'number': disk['enclosure_slot'] // 1000,
                'slot': disk['enclosure_slot'] % 1000
            }
        else:
            disk['enclosure'] = None
        del disk['enclosure_slot']

    def _compress_enclosure(self, disk):
        if disk['enclosure'] is not None:
            disk['enclosure_slot'] = disk['enclosure']['number'] * 1000 + disk[
                'enclosure']['slot']
        else:
            disk['enclosure_slot'] = None
        del disk['enclosure']

    @accepts(Str('id'),
             Patch(
                 'disk_entry',
                 'disk_update',
                 ('rm', {
                     'name': 'identifier'
                 }),
                 ('rm', {
                     'name': 'name'
                 }),
                 ('rm', {
                     'name': 'subsystem'
                 }),
                 ('rm', {
                     'name': 'serial'
                 }),
                 ('rm', {
                     'name': 'kmip_uid'
                 }),
                 ('rm', {
                     'name': 'size'
                 }),
                 ('rm', {
                     'name': 'transfermode'
                 }),
                 ('rm', {
                     'name': 'expiretime'
                 }),
                 ('rm', {
                     'name': 'model'
                 }),
                 ('rm', {
                     'name': 'rotationrate'
                 }),
                 ('rm', {
                     'name': 'type'
                 }),
                 ('rm', {
                     'name': 'zfs_guid'
                 }),
                 ('rm', {
                     'name': 'devname'
                 }),
                 ('attr', {
                     'update': True
                 }),
             ))
    async def do_update(self, id, data):
        """
        Update disk of `id`.

        If extra options need to be passed to SMART which we don't already support, they can be passed by
        `smartoptions`.

        `critical`, `informational` and `difference` are integer values on which alerts for SMART are configured
        if the disk temperature crosses the assigned threshold for each respective attribute.
        If they are set to null, then SMARTD config values are used as defaults.

        Email of log level LOG_CRIT is issued when disk temperature crosses `critical`.

        Email of log level LOG_INFO is issued when disk temperature crosses `informational`.

        If temperature of a disk changes by `difference` degree Celsius since the last report, SMART reports this.
        """

        old = await self.middleware.call(
            'datastore.query', 'storage.disk', [['identifier', '=', id]], {
                'get': True,
                'prefix': self._config.datastore_prefix
            })
        old.pop('enabled', None)
        self._expand_enclosure(old)
        new = old.copy()
        new.update(data)
        if not new['passwd'] and old['passwd'] != new['passwd']:
            # We want to make sure kmip uid is None in this case
            if new['kmip_uid']:
                asyncio.ensure_future(
                    self.middleware.call('kmip.reset_sed_disk_password', id,
                                         new['kmip_uid']))
            new['kmip_uid'] = None

        for key in ['advpowermgmt', 'hddstandby']:
            new[key] = new[key].title()

        self._compress_enclosure(new)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, new,
                                   {'prefix': self._config.datastore_prefix})

        if any(new[key] != old[key] for key in ['hddstandby', 'advpowermgmt']):
            await self.middleware.call('disk.power_management', new['name'])

        if any(new[key] != old[key] for key in [
                'togglesmart', 'smartoptions', 'hddstandby', 'critical',
                'difference', 'informational'
        ]):
            if new['togglesmart']:
                await self.middleware.call('disk.toggle_smart_on', new['name'])
            else:
                await self.middleware.call('disk.toggle_smart_off',
                                           new['name'])

            await self.middleware.call('disk.update_smartctl_args_for_disks')
            await self.middleware.call('service.restart', 'collectd')
            await self._service_change('smartd', 'restart')
            await self._service_change('snmp', 'restart')

        if new['passwd'] and old['passwd'] != new['passwd']:
            await self.middleware.call('kmip.sync_sed_keys', [id])

        return await self.query([['identifier', '=', id]], {'get': True})

    @private
    async def copy_settings(self, old, new):
        await self.middleware.call(
            'disk.update', new['identifier'], {
                k: v
                for k, v in old.items() if k in [
                    'togglesmart',
                    'advpowermgmt',
                    'description',
                    'hddstandby',
                    'smartoptions',
                    'critical',
                    'difference',
                    'informational',
                ]
            })

        changed = False
        for row in await self.middleware.call(
                'datastore.query', 'tasks.smarttest_smarttest_disks', [
                    ['disk_id', '=', old['identifier']],
                ], {'relationships': False}):
            try:
                await self.middleware.call(
                    'datastore.insert', 'tasks.smarttest_smarttest_disks', {
                        'smarttest_id': row['smarttest_id'],
                        'disk_id': new['identifier'],
                    })
            except IntegrityError:
                pass
            else:
                changed = True

        if changed:
            asyncio.ensure_future(self._service_change('smartd', 'restart'))

    @private
    async def check_clean(self, disk):
        return not bool(await self.middleware.call('disk.list_partitions',
                                                   disk))

    @private
    async def sed_unlock_all(self):
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        advconfig = await self.middleware.call('system.advanced.config')
        disks = await self.middleware.call('disk.query', [],
                                           {'extra': {
                                               'passwords': True
                                           }})

        # If no SED password was found we can stop here
        if not await self.middleware.call('system.advanced.sed_global_password'
                                          ) and not any(
                                              [d['passwd'] for d in disks]):
            return

        result = await asyncio_map(
            lambda disk: self.sed_unlock(disk['name'], disk, advconfig), disks,
            16)
        locked = list(filter(lambda x: x['locked'] is True, result))
        if locked:
            disk_names = ', '.join([i['name'] for i in locked])
            self.logger.warn(
                f'Failed to unlock following SED disks: {disk_names}')
            raise CallError('Failed to unlock SED disks', errno.EACCES)
        return True

    @private
    async def sed_unlock(self, disk_name, disk=None, _advconfig=None):
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        if _advconfig is None:
            _advconfig = await self.middleware.call('system.advanced.config')

        devname = f'/dev/{disk_name}'
        # We need two states to tell apart when disk was successfully unlocked
        locked = None
        unlocked = None
        password = await self.middleware.call(
            'system.advanced.sed_global_password')

        if disk is None:
            disk = await self.query([('name', '=', disk_name)],
                                    {'extra': {
                                        'passwords': True
                                    }})
            if disk and disk[0]['passwd']:
                password = disk[0]['passwd']
        elif disk.get('passwd'):
            password = disk['passwd']

        rv = {'name': disk_name, 'locked': None}

        if not password:
            # If there is no password no point in continuing
            return rv

        # Try unlocking TCG OPAL using sedutil
        cp = await run('sedutil-cli', '--query', devname, check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode(errors='ignore')
            if 'Locked = Y' in output:
                locked = True
                cp = await run('sedutil-cli',
                               '--setLockingRange',
                               '0',
                               'RW',
                               password,
                               devname,
                               check=False)
                if cp.returncode == 0:
                    locked = False
                    unlocked = True
                    # If we were able to unlock it, let's set mbrenable to off
                    cp = await run('sedutil-cli',
                                   '--setMBREnable',
                                   'off',
                                   password,
                                   devname,
                                   check=False)
                    if cp.returncode:
                        self.logger.error(
                            'Failed to set MBREnable for %r to "off": %s',
                            devname,
                            cp.stderr.decode(),
                            exc_info=True)

            elif 'Locked = N' in output:
                locked = False

        # Try ATA Security if SED was not unlocked and its not locked by OPAL
        if not unlocked and not locked:
            locked, unlocked = await self.middleware.call(
                'disk.unlock_ata_security', devname, _advconfig, password)

        if locked:
            self.logger.error(f'Failed to unlock {disk_name}')

        rv['locked'] = locked
        return rv

    @private
    async def sed_initial_setup(self, disk_name, password):
        """
        NO_SED - Does not support SED
        ACCESS_GRANTED - Already setup and `password` is a valid password
        LOCKING_DISABLED - Locking range is disabled
        SETUP_FAILED - Initial setup call failed
        SUCCESS - Setup successfully completed
        """
        # on an HA system, if both controllers manage to send
        # SED commands at the same time, then it can cause issues
        # where, ultimately, the disks don't get unlocked
        if await self.middleware.call('failover.licensed'):
            if await self.middleware.call('failover.status') == 'BACKUP':
                return

        devname = f'/dev/{disk_name}'
        cp = await run('sedutil-cli', '--isValidSED', devname, check=False)
        if b' SED ' not in cp.stdout:
            return 'NO_SED'

        cp = await run('sedutil-cli',
                       '--listLockingRange',
                       '0',
                       password,
                       devname,
                       check=False)
        if cp.returncode == 0:
            output = cp.stdout.decode()
            if RE_SED_RDLOCK_EN.search(output) and RE_SED_WRLOCK_EN.search(
                    output):
                return 'ACCESS_GRANTED'
            else:
                return 'LOCKING_DISABLED'

        try:
            await run('sedutil-cli', '--initialSetup', password, devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(
                f'initialSetup failed for {disk_name}:\n{e.stdout}{e.stderr}')
            return 'SETUP_FAILED'

        # OPAL 2.0 disks do not enable locking range on setup like Enterprise does
        try:
            await run('sedutil-cli', '--enableLockingRange', '0', password,
                      devname)
        except subprocess.CalledProcessError as e:
            self.logger.debug(
                f'enableLockingRange failed for {disk_name}:\n{e.stdout}{e.stderr}'
            )
            return 'SETUP_FAILED'

        return 'SUCCESS'

    @private
    async def configure_power_management(self):
        """
        This runs on boot to properly configure all power management options
        (Advanced Power Management and IDLE) for all disks.
        """
        if await self.middleware.call('system.product_type'
                                      ) != 'SCALE_ENTERPRISE':
            for disk in await self.middleware.call('disk.query'):
                await self.middleware.call('disk.power_management',
                                           disk['name'], disk)

    @private
    async def power_management(self, dev, disk=None):
        """
        Actually sets power management for `dev`.
        `disk` is the disk.query entry and optional so this can be called only with disk name.
        """

        if not disk:
            disk = await self.middleware.call('disk.query',
                                              [('name', '=', dev)])
            if not disk:
                return
            disk = disk[0]

        return await self.middleware.call('disk.power_management_impl', dev,
                                          disk)
示例#11
0
class SystemGeneralService(ConfigService):

    class Config:
        namespace = 'system.general'
        datastore = 'system.settings'
        datastore_prefix = 'stg_'
        datastore_extend = 'system.general.general_system_extend'
        cli_namespace = 'system.general'

    ENTRY = Dict(
        'system_general_entry',
        Patch(
            'certificate_entry', 'ui_certificate',
            ('attr', {'null': True, 'required': True}),
        ),
        Int('ui_httpsport', validators=[Range(min=1, max=65535)], required=True),
        Bool('ui_httpsredirect', required=True),
        List(
            'ui_httpsprotocols', items=[Str('protocol', enum=HTTPS_PROTOCOLS)],
            empty=False, unique=True, required=True
        ),
        Int('ui_port', validators=[Range(min=1, max=65535)], required=True),
        List('ui_address', items=[IPAddr('addr')], empty=False, required=True),
        List('ui_v6address', items=[IPAddr('addr')], empty=False, required=True),
        List('ui_allowlist', items=[IPAddr('addr', network=True, network_strict=True)], required=True),
        Bool('ui_consolemsg', required=True),
        Str('ui_x_frame_options', enum=['SAMEORIGIN', 'DENY', 'ALLOW_ALL'], required=True),
        Str('kbdmap', required=True),
        Str('language', empty=False, required=True),
        Str('timezone', empty=False, required=True),
        Bool('crash_reporting', null=True, required=True),
        Bool('usage_collection', null=True, required=True),
        Datetime('birthday', required=True),
        Bool('wizardshown', required=True),
        Bool('crash_reporting_is_set', required=True),
        Bool('usage_collection_is_set', required=True),
        Int('id', required=True),
    )

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._original_datastore = {}
        self._rollback_timer = None

    @private
    async def general_system_extend(self, data):
        for key in list(data.keys()):
            if key.startswith('gui'):
                data['ui_' + key[3:]] = data.pop(key)

        if data['ui_certificate']:
            data['ui_certificate'] = await self.middleware.call(
                'certificate.get_instance', data['ui_certificate']['id']
            )

        data['crash_reporting_is_set'] = data['crash_reporting'] is not None
        if data['crash_reporting'] is None:
            data['crash_reporting'] = True

        data['usage_collection_is_set'] = data['usage_collection'] is not None
        if data['usage_collection'] is None:
            data['usage_collection'] = True

        data.pop('pwenc_check')

        return data

    @private
    async def validate_general_settings(self, data, schema):
        verrors = ValidationErrors()

        language = data.get('language')
        system_languages = await self.middleware.call('system.general.language_choices')
        if language not in system_languages.keys():
            verrors.add(
                f'{schema}.language',
                f'Specified "{language}" language unknown. Please select a valid language.'
            )

        if data['kbdmap'] not in await self.middleware.call('system.general.kbdmap_choices'):
            verrors.add(
                f'{schema}.kbdmap',
                'Please enter a valid keyboard layout'
            )

        timezone = data.get('timezone')
        timezones = await self.middleware.call('system.general.timezone_choices')
        if timezone not in timezones:
            verrors.add(
                f'{schema}.timezone',
                'Timezone not known. Please select a valid timezone.'
            )

        ip4_addresses_list = await self.middleware.call('system.general.ui_address_choices')
        ip6_addresses_list = await self.middleware.call('system.general.ui_v6address_choices')

        ip4_addresses = data.get('ui_address')
        for ip4_address in ip4_addresses:
            if ip4_address not in ip4_addresses_list:
                verrors.add(
                    f'{schema}.ui_address',
                    f'{ip4_address} ipv4 address is not associated with this machine'
                )

        ip6_addresses = data.get('ui_v6address')
        for ip6_address in ip6_addresses:
            if ip6_address not in ip6_addresses_list:
                verrors.add(
                    f'{schema}.ui_v6address',
                    f'{ip6_address} ipv6 address is not associated with this machine'
                )

        for key, wildcard, ips in [('ui_address', '0.0.0.0', ip4_addresses), ('ui_v6address', '::', ip6_addresses)]:
            if wildcard in ips and len(ips) > 1:
                verrors.add(
                    f'{schema}.{key}',
                    f'When "{wildcard}" has been selected, selection of other addresses is not allowed'
                )

        certificate_id = data.get('ui_certificate')
        cert = await self.middleware.call(
            'certificate.query',
            [["id", "=", certificate_id]]
        )
        if not cert:
            verrors.add(
                f'{schema}.ui_certificate',
                'Please specify a valid certificate which exists in the system'
            )
        else:
            cert = cert[0]
            verrors.extend(
                await self.middleware.call(
                    'certificate.cert_services_validation', certificate_id, f'{schema}.ui_certificate', False
                )
            )

            if cert['fingerprint']:
                syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_USER)
                syslog.syslog(syslog.LOG_ERR, 'Fingerprint of the certificate used in UI : ' + cert['fingerprint'])
                syslog.closelog()

        return verrors

    @accepts(
        Patch(
            'system_general_entry', 'general_settings',
            ('rm', {'name': 'crash_reporting_is_set'}),
            ('rm', {'name': 'usage_collection_is_set'}),
            ('rm', {'name': 'wizardshown'}),
            ('rm', {'name': 'id'}),
            ('replace', Int('ui_certificate', null=True)),
            ('add', Int('rollback_timeout', null=True)),
            ('add', Int('ui_restart_delay', null=True)),
            ('attr', {'update': True}),
        ),
    )
    async def do_update(self, data):
        """
        Update System General Service Configuration.

        `ui_certificate` is used to enable HTTPS access to the system. If `ui_certificate` is not configured on boot,
        it is automatically created by the system.

        `ui_httpsredirect` when set, makes sure that all HTTP requests are converted to HTTPS requests to better
        enhance security.

        `ui_address` and `ui_v6address` are a list of valid ipv4/ipv6 addresses respectively which the system will
        listen on.

        `ui_allowlist` is a list of IP addresses and networks that are allow to use API and UI. If this list is empty,
        then all IP addresses are allowed to use API and UI.

        UI configuration is not applied automatically. Call `system.general.ui_restart` to apply new UI settings (all
        HTTP connections will be aborted) or specify `ui_restart_delay` (in seconds) to automatically apply them after
        some small amount of time necessary you might need to receive the response for your settings update request.

        If incorrect UI configuration is applied, you might loss API connectivity and won't be able to fix the settings.
        To avoid that, specify `rollback_timeout` (in seconds). It will automatically roll back UI configuration to the
        previously working settings after `rollback_timeout` passes unless you call `system.general.checkin` in case
        the new settings were correct and no rollback is necessary.
        """
        rollback_timeout = data.pop('rollback_timeout', None)
        ui_restart_delay = data.pop('ui_restart_delay', None)

        original_datastore = await self.middleware.call('datastore.config', self._config.datastore)
        original_datastore['stg_guicertificate'] = (
            original_datastore['stg_guicertificate']['id']
            if original_datastore['stg_guicertificate']
            else None
        )

        config = await self.config()
        config['ui_certificate'] = config['ui_certificate']['id'] if config['ui_certificate'] else None
        if not config.pop('crash_reporting_is_set'):
            config['crash_reporting'] = None
        if not config.pop('usage_collection_is_set'):
            config['usage_collection'] = None
        new_config = config.copy()
        new_config.update(data)

        verrors = await self.validate_general_settings(new_config, 'general_settings_update')
        if verrors:
            raise verrors

        keys = new_config.keys()
        for key in list(keys):
            if key.startswith('ui_'):
                new_config['gui' + key[3:]] = new_config.pop(key)

        await self.middleware.call(
            'datastore.update',
            self._config.datastore,
            config['id'],
            new_config,
            {'prefix': 'stg_'}
        )

        if config['kbdmap'] != new_config['kbdmap']:
            await self.middleware.call('etc.generate', 'keyboard')
            await run(['setupcon'], check=False)
            await run(['localectl', 'set-keymap', new_config['kbdmap']], check=False)

        if config['timezone'] != new_config['timezone']:
            await self.middleware.call('zettarepl.update_config', {'timezone': new_config['timezone']})
            await self.middleware.call('service.reload', 'timeservices')
            await self.middleware.call('service.restart', 'cron')

        if config['language'] != new_config['language']:
            await self.middleware.call('system.general.set_language')

        if config['crash_reporting'] != new_config['crash_reporting']:
            await self.middleware.call('system.general.set_crash_reporting')

        await self.middleware.call('service.start', 'ssl')

        if rollback_timeout is not None:
            self._original_datastore = original_datastore
            self._rollback_timer = asyncio.get_event_loop().call_later(
                rollback_timeout,
                lambda: asyncio.ensure_future(self.rollback()),
            )

        if ui_restart_delay is not None:
            await self.middleware.call('system.general.ui_restart', ui_restart_delay)

        return await self.config()

    @private
    def set_crash_reporting(self):
        CrashReporting.enabled_in_settings = self.middleware.call_sync('system.general.config')['crash_reporting']

    @accepts()
    @returns(Int('remaining_seconds', null=True))
    async def checkin_waiting(self):
        """
        Determines whether or not we are waiting user to check-in the applied UI settings changes before they are rolled
        back. Returns a number of seconds before the automatic rollback or null if there are no changes pending.
        """
        if self._rollback_timer:
            remaining = self._rollback_timer.when() - asyncio.get_event_loop().time()
            if remaining > 0:
                return int(remaining)

    @accepts()
    @returns()
    async def checkin(self):
        """
        After UI settings are saved with `rollback_timeout` this method needs to be called within that timeout limit
        to prevent reverting the changes.

        This is to ensure user verifies the changes went as planned and its working.
        """
        if self._rollback_timer:
            self._rollback_timer.cancel()

        self._rollback_timer = None
        self._original_datastore = {}

    @private
    async def rollback(self):
        if self._original_datastore:
            await self.middleware.call(
                'datastore.update',
                self._config.datastore,
                self._original_datastore['id'],
                {k: v for k, v in self._original_datastore.items() if k.startswith('stg_gui')},
            )
            await self.middleware.call('system.general.ui_restart', 0)

            self._rollback_timer = None
            self._original_datastore = {}
示例#12
0
class DockerImagesService(CRUDService):

    class Config:
        datastore_primary_key_type = 'string'
        namespace = 'container.image'
        namespace_alias = 'docker.images'
        cli_namespace = 'app.docker.image'

    ENTRY = Dict(
        'container_image_entry',
        Str('id'),
        Dict('labels', additional_attrs=True),
        List('repo_tags', items=[Str('repo_tag')]),
        List('repo_digests', items=[Str('repo_digest')]),
        Int('size'),
        Bool('dangling'),
        Bool('update_available'),
        Bool('system_image'),
        Datetime('created'),
        List('parsed_repo_tags', items=[Dict(
            'parsed_repo_tag',
            Str('image'),
            Str('tag'),
            Str('registry'),
            Str('complete_tag'),
        )])
    )

    @filterable
    async def query(self, filters, options):
        """
        Retrieve container images present in the system.

        `query-options.extra.parse_tags` is a boolean which when set will have normalized tags to be retrieved
        for container images.
        """
        results = []
        if not await self.middleware.call('service.started', 'docker'):
            return results

        extra = deepcopy(options.get('extra', {}))
        update_cache = await self.middleware.call('container.image.image_update_cache')
        system_images = await self.middleware.call('container.image.get_system_images_tags')
        parse_tags = extra.get('parse_tags', False)

        async with aiodocker.Docker() as docker:
            for image in await docker.images.list():
                repo_tags = image['RepoTags'] or []
                system_image = any(tag in system_images for tag in repo_tags)
                results.append({
                    'id': image['Id'],
                    'labels': image['Labels'] or {},
                    'repo_tags': repo_tags,
                    'repo_digests': image.get('RepoDigests') or [],
                    'size': image['Size'],
                    'created': datetime.fromtimestamp(int(image['Created'])),
                    'dangling': len(repo_tags) == 1 and repo_tags[0] == '<none>:<none>',
                    'update_available': not system_image and any(update_cache[r] for r in repo_tags),
                    'system_image': system_image,
                    **(
                        {'parsed_repo_tags': await self.middleware.call('container.image.parse_tags', repo_tags)}
                        if parse_tags else {}
                    )
                })
        return filter_list(results, filters, options)

    @accepts(
        Dict(
            'image_pull',
            Dict(
                'docker_authentication',
                Str('username', required=True),
                Str('password', required=True),
                default=None,
                null=True,
            ),
            Str('from_image', required=True),
            Str('tag', default=None, null=True),
        )
    )
    @returns(List(items=[Dict('pull_result_entry', Str('status'), additional_attrs=True)]))
    @job()
    async def pull(self, job, data):
        """
        `from_image` is the name of the image to pull. Format for the name is "registry/repo/image" where
        registry may be omitted and it will default to docker registry in this case.

        `tag` specifies tag of the image and defaults to `null`. In case of `null` it will retrieve all the tags
        of the image.

        `docker_authentication` should be specified if image to be retrieved is under a private repository.
        """
        await self.docker_checks()
        # TODO: Have job progress report downloading progress
        async with aiodocker.Docker() as docker:
            try:
                response = await docker.images.pull(
                    from_image=data['from_image'], tag=data['tag'], auth=data['docker_authentication']
                )
            except aiodocker.DockerError as e:
                raise CallError(f'Failed to pull image: {e.message}')

        await self.middleware.call('container.image.clear_update_flag_for_tag', f'{data["from_image"]}:{data["tag"]}')

        return response

    @accepts(
        Str('id'),
        Dict(
            'options',
            Bool('force', default=False),
        )
    )
    @returns()
    async def do_delete(self, id, options):
        """
        `options.force` should be used to force delete an image even if it's in use by a stopped container.
        """
        await self.docker_checks()
        image = await self.get_instance(id)
        if image['system_image']:
            raise CallError(f'{id} is being used by system and cannot be deleted.')

        async with aiodocker.Docker() as docker:
            await docker.images.delete(name=id, force=options['force'])

        await self.middleware.call('container.image.remove_image_from_cache', image)

    @private
    async def load_images_from_file(self, path):
        await self.docker_checks()
        if not os.path.exists(path):
            raise CallError(f'"{path}" path does not exist.', errno=errno.ENOENT)

        resp = []
        async with aiodocker.Docker() as client:
            with open(path, 'rb') as f:
                async for i in client.images.import_image(data=f, stream=True):
                    if 'error' in i:
                        raise CallError(f'Unable to load images from file: {i["error"]}')
                    else:
                        resp.append(i)
        return resp

    @private
    async def load_default_images(self):
        await self.load_images_from_file(DEFAULT_DOCKER_IMAGES_PATH)

    @private
    async def docker_checks(self):
        if not await self.middleware.call('service.started', 'docker'):
            raise CallError('Docker service is not running')

    @private
    def normalise_tag(self, tag):
        tags = [tag]
        i = tag.find('/')
        if i == -1 or (not any(c in tag[:i] for c in ('.', ':')) and tag[:i] != 'localhost'):
            for registry in (DEFAULT_DOCKER_REGISTRY, 'docker.io'):
                tags.append(f'{registry}/{tag}')
                if '/' not in tag:
                    tags.append(f'{registry}/{DEFAULT_DOCKER_REPO}/{tag}')
        else:
            if tag.startswith('docker.io/'):
                tags.append(f'{DEFAULT_DOCKER_REGISTRY}/{tag[len("docker.io/"):]}')
            elif tag.startswith(DEFAULT_DOCKER_REGISTRY):
                tags.append(f'docker.io/{tag[len(DEFAULT_DOCKER_REGISTRY):]}')
        return tags

    @private
    def get_system_images_tags(self):
        with open(DEFAULT_DOCKER_IMAGES_LIST_PATH, 'r') as f:
            images = [i for i in map(str.strip, f.readlines()) if i]

        images.extend([
            'nvidia/k8s-device-plugin:1.0.0-beta6',
            'k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0',
            'k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0',
            'k8s.gcr.io/sig-storage/csi-resizer:v1.1.0',
            'k8s.gcr.io/sig-storage/snapshot-controller:v4.0.0',
            'k8s.gcr.io/sig-storage/csi-snapshotter:v4.0.0',
            'openebs/zfs-driver:ci',
        ])
        return list(itertools.chain(
            *[self.normalise_tag(tag) for tag in images]
        ))