コード例 #1
0
def test__schema_float_null():
    @accepts(Float('data', null=True))
    def floatnull(self, data):
        return data

    self = Mock()

    assert floatnull(self, None) is None
コード例 #2
0
def test__schema_float_not_null():
    @accepts(Float('data', null=False))
    def floatnotnull(self, data):
        return data

    self = Mock()

    with pytest.raises(Error):
        assert floatnotnull(self, None) is not None
コード例 #3
0
def test__schema_float_values(value, expected):
    @accepts(Float('data', null=False))
    def floatv(self, data):
        return data

    self = Mock()

    if expected is Error:
        with pytest.raises(Error) as ei:
            floatv(self, value)
        assert ei.value.errmsg == 'Not a floating point number'
    else:
        assert floatv(self, value) == expected
コード例 #4
0
class SystemService(Service):
    CPU_INFO = {
        'cpu_model': None,
        'core_count': None,
        'physical_core_count': None,
    }

    MEM_INFO = {
        'physmem_size': None,
    }

    BIRTHDAY_DATE = {
        'date': None,
    }

    HOST_ID = None

    class Config:
        cli_namespace = 'system'

    @private
    async def birthday(self):

        if self.BIRTHDAY_DATE['date'] is None:
            birth = (await self.middleware.call('datastore.config', 'system.settings'))['stg_birthday']
            if birth != datetime(1970, 1, 1):
                self.BIRTHDAY_DATE['date'] = birth

        return self.BIRTHDAY_DATE

    @private
    async def mem_info(self):

        if self.MEM_INFO['physmem_size'] is None:
            # physmem doesn't change after boot so cache the results
            self.MEM_INFO['physmem_size'] = psutil.virtual_memory().total

        return self.MEM_INFO

    @private
    def get_cpu_model(self):
        with open('/proc/cpuinfo', 'r') as f:
            model = RE_CPU_MODEL.search(f.read())
            return model.group(1) if model else None

    @private
    async def cpu_info(self):
        """
        CPU info doesn't change after boot so cache the results
        """

        if self.CPU_INFO['cpu_model'] is None:
            self.CPU_INFO['cpu_model'] = await self.middleware.call('system.get_cpu_model')

        if self.CPU_INFO['core_count'] is None:
            self.CPU_INFO['core_count'] = psutil.cpu_count(logical=True)

        if self.CPU_INFO['physical_core_count'] is None:
            self.CPU_INFO['physical_core_count'] = psutil.cpu_count(logical=False)

        return self.CPU_INFO

    @private
    async def time_info(self):
        uptime_seconds = time.clock_gettime(time.CLOCK_MONOTONIC_RAW)
        current_time = time.time()

        return {
            'uptime_seconds': uptime_seconds,
            'uptime': str(timedelta(seconds=uptime_seconds)),
            'boot_time': datetime.fromtimestamp((current_time - uptime_seconds), timezone.utc),
            'datetime': datetime.fromtimestamp(current_time, timezone.utc),
        }

    @private
    async def hostname(self):
        return socket.gethostname()

    @accepts()
    @returns(Str('system_host_identifier'))
    def host_id(self):
        """
        Retrieve a hex string that is generated based
        on the contents of the `/etc/hostid` file. This
        is a permanent value that persists across
        reboots/upgrades and can be used as a unique
        identifier for the machine.
        """
        if self.HOST_ID is None:
            with open('/etc/hostid', 'rb') as f:
                id = f.read().strip()
                if id:
                    self.HOST_ID = hashlib.sha256(id).hexdigest()

        return self.HOST_ID

    @no_auth_required
    @throttle(seconds=2, condition=throttle_condition)
    @accepts()
    @returns(Datetime('system_build_time'))
    @pass_app()
    async def build_time(self, app):
        """
        Retrieve build time of the system.
        """
        buildtime = sw_buildtime()
        return datetime.fromtimestamp(int(buildtime)) if buildtime else buildtime

    @accepts()
    @returns(Dict(
        'system_info',
        Str('version', required=True, title='TrueNAS Version'),
        Datetime('buildtime', required=True, title='TrueNAS build time'),
        Str('hostname', required=True, title='System host name'),
        Int('physmem', required=True, title='System physical memory'),
        Str('model', required=True, title='CPU Model'),
        Int('cores', required=True, title='CPU Cores'),
        Int('physical_cores', required=True, title='CPU Physical Cores'),
        List('loadavg', required=True),
        Str('uptime', required=True),
        Float('uptime_seconds', required=True),
        Str('system_serial', required=True, null=True),
        Str('system_product', required=True, null=True),
        Str('system_product_version', required=True, null=True),
        Dict('license', additional_attrs=True, null=True),  # TODO: Fill this in please
        Datetime('boottime', required=True),
        Datetime('datetime', required=True),
        Datetime('birthday', required=True, null=True),
        Str('timezone', required=True),
        Str('system_manufacturer', required=True, null=True),
        Bool('ecc_memory', required=True),
    ))
    async def info(self):
        """
        Returns basic system information.
        """
        time_info = await self.middleware.call('system.time_info')
        dmidecode = await self.middleware.call('system.dmidecode_info')
        cpu_info = await self.middleware.call('system.cpu_info')
        mem_info = await self.middleware.call('system.mem_info')
        birthday = await self.middleware.call('system.birthday')
        timezone_setting = (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone']

        return {
            'version': await self.middleware.call('system.version'),
            'buildtime': await self.middleware.call('system.build_time'),
            'hostname': await self.middleware.call('system.hostname'),
            'physmem': mem_info['physmem_size'],
            'model': cpu_info['cpu_model'],
            'cores': cpu_info['core_count'],
            'physical_cores': cpu_info['physical_core_count'],
            'loadavg': list(os.getloadavg()),
            'uptime': time_info['uptime'],
            'uptime_seconds': time_info['uptime_seconds'],
            'system_serial': dmidecode['system-serial-number'] if dmidecode['system-serial-number'] else None,
            'system_product': dmidecode['system-product-name'] if dmidecode['system-product-name'] else None,
            'system_product_version': dmidecode['system-version'] if dmidecode['system-version'] else None,
            'license': await self.middleware.call('system.license'),
            'boottime': time_info['boot_time'],
            'datetime': time_info['datetime'],
            'birthday': birthday['date'],
            'timezone': timezone_setting,
            'system_manufacturer': dmidecode['system-manufacturer'] if dmidecode['system-manufacturer'] else None,
            'ecc_memory': dmidecode['ecc-memory'],
        }

    @private
    def get_synced_clock_time(self):
        """
        Will return synced clock time if ntpd has synced with ntp servers
        otherwise will return none
        """
        client = ntplib.NTPClient()
        try:
            response = client.request('localhost')
        except Exception:
            # Cannot connect to NTP server
            self.logger.error('Error while connecting to NTP server', exc_info=True)
        else:
            if response.version and response.leap != 3:
                # https://github.com/darkhelmet/ntpstat/blob/11f1d49cf4041169e1f741f331f65645b67680d8/ntpstat.c#L172
                # if leap second indicator is 3, it means that the clock has not been synchronized
                return datetime.fromtimestamp(response.tx_time, timezone.utc)
コード例 #5
0
        }

    @filterable
    @filterable_returns(
        Dict(
            'disk_smart_test_result',
            Str('disk', required=True),
            List('tests',
                 items=[
                     Dict(
                         'test_result',
                         Int('num', required=True),
                         Str('description', required=True),
                         Str('status', required=True),
                         Str('status_verbose', required=True),
                         Float('remaining', required=True),
                         Int('lifetime', required=True),
                         Str('lba_of_first_error', null=True, required=True),
                     )
                 ]),
            Dict(
                'current_test',
                Int('progress', required=True),
                null=True,
            ),
        ))
    async def results(self, filters, options):
        """
        Get disk(s) S.M.A.R.T. test(s) results.

        .. examples(websocket)::
コード例 #6
0
class BootService(Service):

    class Config:
        cli_namespace = 'system.boot'

    @private
    async def pool_name(self):
        return BOOT_POOL_NAME

    @accepts()
    @returns(
        Dict(
            'boot_pool_state',
            Str('name'),
            Str('id'),
            Str('guid'),
            Str('hostname'),
            Str('status'),
            Bool('healthy'),
            Int('error_count'),
            Dict(
                'root_dataset',
                Str('id'),
                Str('name'),
                Str('pool'),
                Str('type'),
                Dict(
                    'properties',
                    additional_attrs=True,
                ),
                Str('mountpoint', null=True),
                Bool('encrypted'),
                Str('encryption_root', null=True),
                Bool('key_loaded'),
            ),
            Dict(
                'properties',
                additional_attrs=True,
            ),
            List('features', items=[Dict(
                'feature_item',
                Str('name'),
                Str('guid'),
                Str('description'),
                Str('state'),
            )]),
            Dict(
                'scan',
                Str('function'),
                Str('state'),
                Datetime('start_time', null=True),
                Datetime('end_time', null=True),
                Float('percentage'),
                Int('bytes_to_process'),
                Int('bytes_processed'),
                Datetime('pause', null=True),
                Int('errors'),
                Int('bytes_issued', null=True),
                Int('total_secs_left', null=True),

            ),
            Dict(
                'root_vdev',
                Str('type'),
                Str('path', null=True),
                Str('guid'),
                Str('status'),
                Dict(
                    'stats',
                    Int('timestamp'),
                    Int('read_errors'),
                    Int('write_errors'),
                    Int('checksum_errors'),
                    List('ops', items=[Int('op')]),
                    List('bytes', items=[Int('byte')]),
                    Int('size'),
                    Int('allocated'),
                    Int('fragmentation'),
                    Int('self_healed'),
                    Int('configured_ashift'),
                    Int('logical_ashift'),
                    Int('physical_ashift'),
                ),
            ),
            Dict(
                'groups',
                additional_attrs=True,
            ),
            Str('status_code'),
            Str('status_detail'),
        ),
    )
    async def get_state(self):
        """
        Returns the current state of the boot pool, including all vdevs, properties and datasets.
        """
        return await self.middleware.call('zfs.pool.query', [('name', '=', BOOT_POOL_NAME)], {'get': True})

    @accepts()
    @returns(List('disks', items=[Str('disk')]))
    async def get_disks(self):
        """
        Returns disks of the boot pool.
        """
        return await self.middleware.call('zfs.pool.get_disks', BOOT_POOL_NAME)

    @private
    async def get_boot_type(self):
        """
        Get the boot type of the boot pool.

        Returns:
            "BIOS", "EFI", None
        """
        # https://wiki.debian.org/UEFI
        return 'EFI' if os.path.exists('/sys/firmware/efi') else 'BIOS'

    @accepts(
        Str('dev'),
        Dict(
            'options',
            Bool('expand', default=False),
        ),
    )
    @returns()
    @job(lock='boot_attach')
    async def attach(self, job, dev, options):
        """
        Attach a disk to the boot pool, turning a stripe into a mirror.

        `expand` option will determine whether the new disk partition will be
                 the maximum available or the same size as the current disk.
        """

        disks = list(await self.get_disks())
        if len(disks) > 1:
            raise CallError('3-way mirror not supported')

        format_opts = {}
        if not options['expand']:
            # Lets try to find out the size of the current freebsd-zfs partition so
            # the new partition is not bigger, preventing size mismatch if one of
            # them fail later on. See #21336
            zfs_part = await self.middleware.call('disk.get_partition', disks[0], 'ZFS')
            if zfs_part:
                format_opts['size'] = zfs_part['size']

        swap_part = await self.middleware.call('disk.get_partition', disks[0], 'SWAP')
        if swap_part:
            format_opts['swap_size'] = swap_part['size']
        await self.middleware.call('boot.format', dev, format_opts)

        pool = await self.middleware.call('zfs.pool.query', [['name', '=', BOOT_POOL_NAME]], {'get': True})

        zfs_dev_part = await self.middleware.call('disk.get_partition', dev, 'ZFS')
        extend_pool_job = await self.middleware.call(
            'zfs.pool.extend', BOOT_POOL_NAME, None, [{
                'target': pool['groups']['data'][0]['guid'],
                'type': 'DISK',
                'path': f'/dev/{zfs_dev_part["name"]}'
            }]
        )

        await self.middleware.call('boot.install_loader', dev)

        await job.wrap(extend_pool_job)

        # If the user is upgrading his disks, let's set expand to True to make sure that we
        # register the new disks capacity which increase the size of the pool
        await self.middleware.call('zfs.pool.online', BOOT_POOL_NAME, zfs_dev_part['name'], True)

        await self.update_initramfs()

    @accepts(Str('dev'))
    @returns()
    async def detach(self, dev):
        """
        Detach given `dev` from boot pool.
        """
        await self.middleware.call('zfs.pool.detach', BOOT_POOL_NAME, dev, {'clear_label': True})
        await self.update_initramfs()

    @accepts(Str('label'), Str('dev'))
    @returns()
    async def replace(self, label, dev):
        """
        Replace device `label` on boot pool with `dev`.
        """
        format_opts = {}
        disks = list(await self.get_disks())
        swap_part = await self.middleware.call('disk.get_partition', disks[0], 'SWAP')
        if swap_part:
            format_opts['swap_size'] = swap_part['size']

        await self.middleware.call('boot.format', dev, format_opts)
        zfs_dev_part = await self.middleware.call('disk.get_partition', dev, 'ZFS')
        await self.middleware.call('zfs.pool.replace', BOOT_POOL_NAME, label, zfs_dev_part['name'])
        await self.middleware.call('boot.install_loader', dev)
        await self.update_initramfs()

    @accepts()
    @returns()
    @job(lock='boot_scrub')
    async def scrub(self, job):
        """
        Scrub on boot pool.
        """
        subjob = await self.middleware.call('pool.scrub.scrub', BOOT_POOL_NAME)
        return await job.wrap(subjob)

    @accepts(
        Int('interval', validators=[Range(min=1)])
    )
    @returns(Int('interval'))
    async def set_scrub_interval(self, interval):
        """
        Set Automatic Scrub Interval value in days.
        """
        await self.middleware.call(
            'datastore.update',
            'system.advanced',
            (await self.middleware.call('system.advanced.config'))['id'],
            {'adv_boot_scrub': interval},
        )
        return interval

    @accepts()
    @returns(Int('interval'))
    async def get_scrub_interval(self):
        """
        Get Automatic Scrub Interval value in days.
        """
        return (await self.middleware.call('system.advanced.config'))['boot_scrub']

    @private
    async def update_initramfs(self):
        """
        Returns true if initramfs was updated and false otherwise.
        """
        cp = await run(
            '/usr/local/bin/truenas-initrd.py', '/', encoding='utf8', errors='ignore', check=False
        )
        if cp.returncode > 1:
            raise CallError(f'Failed to update initramfs: {cp.stderr}')

        return cp.returncode == 1

    @private
    async def expand(self):
        boot_pool = await self.middleware.call('boot.pool_name')
        for device in await self.middleware.call('zfs.pool.get_devices', boot_pool):
            try:
                await self.expand_device(device)
            except CallError as e:
                self.middleware.logger.error('Error trying to expand boot pool partition %r: %r', device, e)
            except Exception:
                self.middleware.logger.error('Error trying to expand boot pool partition %r', device, exc_info=True)

    @private
    async def expand_device(self, device):
        disk = await self.middleware.call('disk.get_disk_from_partition', device)

        partitions = await self.middleware.call('disk.list_partitions', disk)
        if len(partitions) != 3:
            raise CallError(f'Expected 3 partitions, found {len(partitions)}')

        if partitions[-1]['name'] != device:
            raise CallError(f'{device} is not the last partition')

        if partitions[-1]['partition_number'] != 3:
            raise CallError(f'{device} is not 3rd partition')

        if partitions[-1]['start_sector'] != partitions[-2]['end_sector'] + 1:
            raise CallError(f'{device} does not immediately follow the 2nd partition')

        disk_size = await self.middleware.call('disk.get_dev_size', disk)
        if partitions[-1]['end'] > disk_size / 1.1:
            return

        self.middleware.logger.info('Resizing boot pool partition %r from %r (disk_size = %r)',
                                    device, partitions[-1]['end'], disk_size)
        await run('sgdisk', '-d', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
        await run('sgdisk', '-N', '3', f'/dev/{disk}', encoding='utf-8', errors='ignore')
        await run('partprobe', encoding='utf-8', errors='ignore')
        await run('zpool', 'online', '-e', 'boot-pool', device, encoding='utf-8', errors='ignore')
コード例 #7
0
class FilesystemService(Service):
    class Config:
        cli_namespace = 'storage.filesystem'

    @private
    def resolve_cluster_path(self, path, ignore_ctdb=False):
        """
        Convert a "CLUSTER:"-prefixed path to an absolute path
        on the server.
        """
        if not path.startswith(FuseConfig.FUSE_PATH_SUBST.value):
            return path

        gluster_volume = path[8:].split("/")[0]
        if gluster_volume == CTDBConfig.CTDB_VOL_NAME.value and not ignore_ctdb:
            raise CallError('access to ctdb volume is not permitted.',
                            errno.EPERM)
        elif not gluster_volume:
            raise CallError(
                f'More than the prefix "{FuseConfig.FUSE_PATH_SUBST.value}" must be provided'
            )

        is_mounted = self.middleware.call_sync('gluster.fuse.is_mounted',
                                               {'name': gluster_volume})
        if not is_mounted:
            raise CallError(
                f'{gluster_volume}: cluster volume is not mounted.',
                errno.ENXIO)

        cluster_path = path.replace(FuseConfig.FUSE_PATH_SUBST.value,
                                    f'{FuseConfig.FUSE_PATH_BASE.value}/')
        return cluster_path

    @accepts(Str('path'))
    @returns(Ref('path_entry'))
    def mkdir(self, path):
        """
        Create a directory at the specified path.
        """
        path = self.resolve_cluster_path(path)
        is_clustered = path.startswith("/cluster")

        p = pathlib.Path(path)
        if not p.is_absolute():
            raise CallError(f'{path}: not an absolute path.', errno.EINVAL)

        if p.exists():
            raise CallError(f'{path}: path already exists.', errno.EEXIST)

        realpath = os.path.realpath(path)
        if not is_clustered and not realpath.startswith('/mnt/'):
            raise CallError(f'{path}: path not permitted', errno.EPERM)

        os.mkdir(path)
        stat = p.stat()
        data = {
            'name': p.parts[-1],
            'path': path,
            'realpath': realpath,
            'type': 'DIRECTORY',
            'size': stat.st_size,
            'mode': stat.st_mode,
            'acl': False if self.acl_is_trivial(path) else True,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
        }

        return data

    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    @filterable_returns(
        Dict('path_entry',
             Str('name', required=True),
             Path('path', required=True),
             Path('realpath', required=True),
             Str('type',
                 required=True,
                 enum=['DIRECTORY', 'FILESYSTEM', 'SYMLINK', 'OTHER']),
             Int('size', required=True, null=True),
             Int('mode', required=True, null=True),
             Bool('acl', required=True, null=True),
             Int('uid', required=True, null=True),
             Int('gid', required=True, null=True),
             register=True))
    def listdir(self, path, filters, options):
        """
        Get the contents of a directory.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        path = self.resolve_cluster_path(path)
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path.replace(f'{FuseConfig.FUSE_PATH_BASE.value}/',
                                   FuseConfig.FUSE_PATH_SUBST.value),
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["path"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_stats',
            Int('size', required=True),
            Int('mode', required=True),
            Int('uid', required=True),
            Int('gid', required=True),
            Float('atime', required=True),
            Float('mtime', required=True),
            Float('ctime', required=True),
            Int('dev', required=True),
            Int('inode', required=True),
            Int('nlink', required=True),
            Str('user', null=True, required=True),
            Str('group', null=True, required=True),
            Bool('acl', required=True),
        ))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = self.resolve_cluster_path(path)
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.acl_is_trivial(path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        if path == PWENC_FILE_SECRET:
            self.middleware.call_sync('pwenc.reset_secret_cache')
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @returns()
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @returns(Bool('successful_put'))
    @job(pipes=["input"])
    async def put(self, job, path, options):
        """
        Job to put contents to `path`.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_statfs',
            List('flags', required=True),
            List('fsid', required=True),
            Str('fstype', required=True),
            Str('source', required=True),
            Str('dest', required=True),
            Int('blocksize', required=True),
            Int('total_blocks', required=True),
            Int('free_blocks', required=True),
            Int('avail_blocks', required=True),
            Int('files', required=True),
            Int('free_files', required=True),
            Int('name_max', required=True),
            Int('total_bytes', required=True),
            Int('free_bytes', required=True),
            Int('avail_bytes', required=True),
        ))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Raises:
            CallError(ENOENT) - Path not found
        """
        # check to see if this is a clustered path and if it is
        # resolve it to an absolute path
        # NOTE: this converts path prefixed with 'CLUSTER:' to '/cluster/...'
        path = self.resolve_cluster_path(path, ignore_ctdb=True)

        allowed_prefixes = ('/mnt/', FuseConfig.FUSE_PATH_BASE.value)
        if not path.startswith(allowed_prefixes):
            # if path doesn't start with '/mnt/' bail early
            raise CallError(
                f'Path must start with {" or ".join(allowed_prefixes)}')
        elif path == '/mnt/':
            # means the path given to us was a literal '/mnt/' which is incorrect.
            # NOTE: if the user provided 'CLUSTER:' as the literal path then
            # self.resolve_cluster_path() will raise a similar error
            raise CallError('Path must include more than "/mnt/"')

        try:
            st = os.statvfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)

        # get the closest mountpoint to the path provided
        mountpoint = pathlib.Path(path)
        while not mountpoint.is_mount():
            mountpoint = mountpoint.parent.absolute()

        # strip the `/mnt/` or `/cluster/` prefix from the mountpoint
        device = mountpoint.as_posix().removeprefix('/mnt/')
        device = device.removeprefix('/cluster/')

        # we only look for /mnt/ or /cluster/ paths and, currently,
        # those 2 paths are limited to zfs and/or fuse.glusterfs
        fstype = 'zfs' if path.startswith('/mnt/') else 'fuse.glusterfs'

        return {
            'flags': [],
            'fstype': fstype,
            'source': device,
            'dest': mountpoint.as_posix(),
            'blocksize': st.f_frsize,
            'total_blocks': st.f_blocks,
            'free_blocks': st.f_bfree,
            'avail_blocks': st.f_bavail,
            'files': st.f_files,
            'free_files': st.f_ffree,
            'name_max': st.f_namemax,
            'fsid': [],
            'total_bytes': st.f_blocks * st.f_frsize,
            'free_bytes': st.f_bfree * st.f_frsize,
            'avail_bytes': st.f_bavail * st.f_frsize,
        }

    @accepts(Str('path'))
    @returns(Bool('paths_acl_is_trivial'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = self.resolve_cluster_path(path)
        if not os.path.exists(path):
            raise CallError(f'Path not found [{path}].', errno.ENOENT)

        acl = self.middleware.call_sync('filesystem.getacl', path, True)
        return acl['trivial']
コード例 #8
0
class RealtimeEventSource(EventSource):
    """
    Retrieve real time statistics for CPU, network,
    virtual memory and zfs arc.
    """
    ACCEPTS = Dict(Int('interval', default=2, validators=[Range(min=2)]), )
    RETURNS = Dict(
        Dict('cpu', additional_attrs=True),
        Dict(
            'disks',
            Float('busy'),
            Float('read_bytes'),
            Float('write_bytes'),
            Float('read_ops'),
            Float('write_ops'),
        ),
        Dict('interfaces', additional_attrs=True),
        Dict(
            'memory',
            Dict(
                'classes',
                Int('apps'),
                Int('arc'),
                Int('buffers'),
                Int('cache'),
                Int('page_tables'),
                Int('slab_cache'),
                Int('swap_cache'),
                Int('unused'),
            ), Dict('extra', additional_attrs=True),
            Dict(
                'swap',
                Int('total'),
                Int('used'),
            )),
        Dict('virtual_memory', additional_attrs=True),
        Dict(
            'zfs',
            Int('arc_max_size'),
            Int('arc_size'),
            Float('cache_hit_ratio'),
        ),
    )
    INTERFACE_SPEEDS_CACHE_INTERVAL = 300

    @staticmethod
    def get_cpu_usages(cp_diff):
        cp_total = sum(cp_diff) or 1
        data = {}
        data['user'] = cp_diff[0] / cp_total * 100
        data['nice'] = cp_diff[1] / cp_total * 100
        data['system'] = cp_diff[2] / cp_total * 100
        data['idle'] = cp_diff[3] / cp_total * 100
        data['iowait'] = cp_diff[4] / cp_total * 100
        data['irq'] = cp_diff[5] / cp_total * 100
        data['softirq'] = cp_diff[6] / cp_total * 100
        data['steal'] = cp_diff[7] / cp_total * 100
        data['guest'] = cp_diff[8] / cp_total * 100
        data['guest_nice'] = cp_diff[9] / cp_total * 100
        if sum(cp_diff):
            # Usage is the sum of all but idle and iowait
            data['usage'] = (
                (cp_total - cp_diff[3] - cp_diff[4]) / cp_total) * 100
        else:
            data['usage'] = 0
        return data

    def get_memory_info(self, arc_size):
        with open("/proc/meminfo") as f:
            meminfo = {
                s[0]: humanfriendly.parse_size(s[1], binary=True)
                for s in [line.split(":", 1) for line in f.readlines()]
            }

        classes = {}
        classes["page_tables"] = meminfo["PageTables"]
        classes["swap_cache"] = meminfo["SwapCached"]
        classes["slab_cache"] = meminfo["Slab"]
        classes["cache"] = meminfo["Cached"]
        classes["buffers"] = meminfo["Buffers"]
        classes["unused"] = meminfo["MemFree"]
        classes["arc"] = arc_size
        classes["apps"] = meminfo["MemTotal"] - sum(classes.values())

        extra = {
            "inactive": meminfo["Inactive"],
            "committed": meminfo["Committed_AS"],
            "active": meminfo["Active"],
            "vmalloc_used": meminfo["VmallocUsed"],
            "mapped": meminfo["Mapped"],
        }

        swap = {
            "used": meminfo["SwapTotal"] - meminfo["SwapFree"],
            "total": meminfo["SwapTotal"],
        }

        return {
            "classes": classes,
            "extra": extra,
            "swap": swap,
        }

    def get_interface_speeds(self):
        speeds = {}

        interfaces = self.middleware.call_sync('interface.query')
        for interface in interfaces:
            if m := RE_BASE.match(interface['state']['active_media_subtype']):
                speeds[interface['name']] = int(m.group(1)) * MEGABIT
            elif m := RE_MBS.match(interface['state']['active_media_subtype']):
                speeds[interface['name']] = int(m.group(1)) * MEGABIT
コード例 #9
0
class FilesystemService(Service):
    class Config:
        cli_namespace = 'storage.filesystem'

    @private
    def resolve_cluster_path(self, path):
        """
        Convert a "CLUSTER:"-prefixed path to an absolute path
        on the server.
        """
        if not path.startswith(FuseConfig.FUSE_PATH_SUBST.value):
            return path

        gluster_volume = path[8:].split("/")[0]
        if gluster_volume == CTDBConfig.CTDB_VOL_NAME.value:
            raise CallError('access to ctdb volume is not permitted.',
                            errno.EPERM)

        is_mounted = self.middleware.call_sync('gluster.fuse.is_mounted',
                                               {'name': gluster_volume})
        if not is_mounted:
            raise CallError(
                f'{gluster_volume}: cluster volume is not mounted.',
                errno.ENXIO)

        cluster_path = path.replace(FuseConfig.FUSE_PATH_SUBST.value,
                                    f'{FuseConfig.FUSE_PATH_BASE.value}/')
        return cluster_path

    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    @filterable_returns(
        Dict(
            'path_entry',
            Str('name', required=True),
            Path('path', required=True),
            Path('realpath', required=True),
            Str('type',
                required=True,
                enum=['DIRECTORY', 'FILESYSTEM', 'SYMLINK', 'OTHER']),
            Int('size', required=True, null=True),
            Int('mode', required=True, null=True),
            Bool('acl', required=True, null=True),
            Int('uid', required=True, null=True),
            Int('gid', required=True, null=True),
        ))
    def listdir(self, path, filters, options):
        """
        Get the contents of a directory.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        path = self.resolve_cluster_path(path)
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path.replace(f'{FuseConfig.FUSE_PATH_BASE.value}/',
                                   FuseConfig.FUSE_PATH_SUBST.value),
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["path"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_stats',
            Int('size', required=True),
            Int('mode', required=True),
            Int('uid', required=True),
            Int('gid', required=True),
            Float('atime', required=True),
            Float('mtime', required=True),
            Float('ctime', required=True),
            Int('dev', required=True),
            Int('inode', required=True),
            Int('nlink', required=True),
            Str('user', null=True, required=True),
            Str('group', null=True, required=True),
            Bool('acl', required=True),
        ))
    def stat(self, path):
        """
        Return the filesystem stat(2) for a given `path`.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = self.resolve_cluster_path(path)
        try:
            stat = os.stat(path, follow_symlinks=False)
        except FileNotFoundError:
            raise CallError(f'Path {path} not found', errno.ENOENT)

        stat = {
            'size': stat.st_size,
            'mode': stat.st_mode,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
            'atime': stat.st_atime,
            'mtime': stat.st_mtime,
            'ctime': stat.st_ctime,
            'dev': stat.st_dev,
            'inode': stat.st_ino,
            'nlink': stat.st_nlink,
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.acl_is_trivial(path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        if path == PWENC_FILE_SECRET:
            self.middleware.call_sync('pwenc.reset_secret_cache')
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @returns()
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @returns(Bool('successful_put'))
    @job(pipes=["input"])
    async def put(self, job, path, options):
        """
        Job to put contents to `path`.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_statfs',
            List('flags', required=True),
            List('fsid', required=True),
            Str('fstype', required=True),
            Str('source', required=True),
            Str('dest', required=True),
            Int('blocksize', required=True),
            Int('total_blocks', required=True),
            Int('free_blocks', required=True),
            Int('avail_blocks', required=True),
            Int('files', required=True),
            Int('free_files', required=True),
            Int('name_max', required=True),
            Int('total_bytes', required=True),
            Int('free_bytes', required=True),
            Int('avail_bytes', required=True),
        ))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Raises:
            CallError(ENOENT) - Path not found
        """
        try:
            st = os.statvfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)

        for partition in sorted(psutil.disk_partitions(),
                                key=lambda p: len(p.mountpoint),
                                reverse=True):
            if is_child(os.path.realpath(path), partition.mountpoint):
                break
        else:
            raise CallError('Unable to find mountpoint.')

        return {
            'flags': [],
            'fstype': partition.fstype,
            'source': partition.device,
            'dest': partition.mountpoint,
            'blocksize': st.f_frsize,
            'total_blocks': st.f_blocks,
            'free_blocks': st.f_bfree,
            'avail_blocks': st.f_bavail,
            'files': st.f_files,
            'free_files': st.f_ffree,
            'name_max': st.f_namemax,
            'fsid': [],
            'total_bytes': st.f_blocks * st.f_frsize,
            'free_bytes': st.f_bfree * st.f_frsize,
            'avail_bytes': st.f_bavail * st.f_frsize,
        }

    @accepts(Str('path'))
    @returns(Bool('paths_acl_is_trivial'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = self.resolve_cluster_path(path)
        if not os.path.exists(path):
            raise CallError(f'Path not found [{path}].', errno.ENOENT)

        acl = self.middleware.call_sync('filesystem.getacl', path, True)
        return acl['trivial']
コード例 #10
0
class CatalogService(CRUDService):
    class Config:
        datastore = 'services.catalog'
        datastore_extend = 'catalog.catalog_extend'
        datastore_extend_context = 'catalog.catalog_extend_context'
        datastore_primary_key = 'label'
        datastore_primary_key_type = 'string'
        cli_namespace = 'app.catalog'

    ENTRY = Dict(
        'catalog_entry',
        Str('label',
            required=True,
            validators=[Match(r'^\w+[\w.-]*$')],
            max_length=60),
        Str('repository', required=True, empty=False),
        Str('branch', required=True, empty=False),
        Str('location', required=True),
        Str('id', required=True),
        List('preferred_trains'),
        Dict('trains', additional_attrs=True),
        Bool('healthy'),
        Bool('error'),
        Bool('builtin'),
        Bool('cached'),
        Dict(
            'caching_progress',
            Str('description', null=True),
            Any('extra', null=True),
            Float('percent', null=True),
            null=True,
        ),
        Dict('caching_job', null=True, additional_attrs=True),
    )

    @private
    async def catalog_extend_context(self, rows, extra):
        k8s_dataset = (await
                       self.middleware.call('kubernetes.config'))['dataset']
        catalogs_dir = os.path.join(
            '/mnt', k8s_dataset,
            'catalogs') if k8s_dataset else f'{TMP_IX_APPS_DIR}/catalogs'
        context = {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
            'catalogs_context': {},
        }
        if extra.get('item_details'):
            item_sync_params = await self.middleware.call(
                'catalog.sync_items_params')
            item_jobs = await self.middleware.call(
                'core.get_jobs',
                [['method', '=', 'catalog.items'], ['state', '=', 'RUNNING']])
            for row in rows:
                label = row['label']
                catalog_info = {
                    'item_job':
                    await self.middleware.call(
                        'catalog.items', label, {
                            'cache':
                            True,
                            'cache_only':
                            await self.official_catalog_label() !=
                            row['label'],
                            'retrieve_all_trains':
                            extra.get('retrieve_all_trains', True),
                            'trains':
                            extra.get('trains', []),
                        }),
                    'cached':
                    label == OFFICIAL_LABEL or await self.middleware.call(
                        'catalog.cached', label, False) or await
                    self.middleware.call('catalog.cached', label, True),
                    'normalized_progress':
                    None,
                }
                if not catalog_info['cached']:
                    caching_job = filter_list(
                        item_jobs,
                        [['arguments', '=', [row['label'], item_sync_params]]])
                    if not caching_job:
                        caching_job_obj = await self.middleware.call(
                            'catalog.items', label, item_sync_params)
                        caching_job = caching_job_obj.__encode__()
                    else:
                        caching_job = caching_job[0]

                    catalog_info['normalized_progress'] = {
                        'caching_job': caching_job,
                        'caching_progress': caching_job['progress'],
                    }
                context['catalogs_context'][label] = catalog_info

        return context

    @private
    async def normalize_data_from_item_job(self, label, catalog_context):
        normalized = {
            'trains': {},
            'cached': catalog_context['cached'],
            'healthy': False,
            'error': True,
            'caching_progress': None,
            'caching_job': None,
        }
        item_job = catalog_context['item_job']
        await item_job.wait()
        if not item_job.error:
            normalized.update({
                'trains':
                item_job.result,
                'healthy':
                all(app['healthy'] for train in item_job.result
                    for app in item_job.result[train].values()),
                'cached':
                label == OFFICIAL_LABEL
                or await self.middleware.call('catalog.cached', label, False)
                or await self.middleware.call('catalog.cached', label, True),
                'error':
                False,
                'caching_progress':
                None,
                'caching_job':
                None,
            })
        return normalized

    @private
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location':
            os.path.join(
                context['catalogs_dir'],
                convert_repository_to_path(catalog['repository'],
                                           catalog['branch'])),
            'id':
            catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            catalog_context = context['catalogs_context'][catalog['label']]
            catalog.update(await self.normalize_data_from_item_job(
                catalog['id'], catalog_context))
            if catalog['cached']:
                return catalog
            else:
                catalog.update(catalog_context['normalized_progress'])
        return catalog

    @private
    async def common_validation(self, catalog, schema, data):
        found_trains = set(catalog['trains'])
        diff = set(data['preferred_trains']) - found_trains
        verrors = ValidationErrors()
        if diff:
            verrors.add(
                f'{schema}.preferred_trains',
                f'{", ".join(diff)} trains were not found in catalog.')
        if not data['preferred_trains']:
            verrors.add(
                f'{schema}.preferred_trains',
                'At least 1 preferred train must be specified for a catalog.')

        verrors.check()

    @accepts(
        Patch(
            'catalog_entry',
            'catalog_create',
            ('add', Bool('force', default=False)),
            ('rm', {
                'name': 'id'
            }),
            ('rm', {
                'name': 'trains'
            }),
            ('rm', {
                'name': 'healthy'
            }),
            ('rm', {
                'name': 'error'
            }),
            ('rm', {
                'name': 'builtin'
            }),
            ('rm', {
                'name': 'location'
            }),
            ('rm', {
                'name': 'cached'
            }),
            ('rm', {
                'name': 'caching_progress'
            }),
            ('rm', {
                'name': 'caching_job'
            }),
        ), )
    @job(lock=lambda args: f'catalog_create_{args[0]["label"]}')
    async def do_create(self, job, data):
        """
        `catalog_create.preferred_trains` specifies trains which will be displayed in the UI directly for a user.
        """
        verrors = ValidationErrors()
        # We normalize the label
        data['label'] = data['label'].upper()

        if await self.query([['id', '=', data['label']]]):
            verrors.add('catalog_create.label',
                        'A catalog with specified label already exists',
                        errno=errno.EEXIST)

        if await self.query([['repository', '=', data['repository']],
                             ['branch', '=', data['branch']]]):
            for k in ('repository', 'branch'):
                verrors.add(
                    f'catalog_create.{k}',
                    'A catalog with same repository/branch already exists',
                    errno=errno.EEXIST)

        verrors.check()

        if not data['preferred_trains']:
            data['preferred_trains'] = ['stable']

        if not data.pop('force'):
            job.set_progress(40, f'Validating {data["label"]!r} catalog')
            # We will validate the catalog now to ensure it's valid wrt contents / format
            path = os.path.join(
                TMP_IX_APPS_DIR, 'validate_catalogs',
                convert_repository_to_path(data['repository'], data['branch']))
            try:
                await self.middleware.call('catalog.update_git_repository', {
                    **data, 'location': path
                }, True)
                await self.middleware.call(
                    'catalog.validate_catalog_from_path', path)
                await self.common_validation(
                    {
                        'trains':
                        await self.middleware.call(
                            'catalog.retrieve_train_names', path)
                    }, 'catalog_create', data)
            except CallError as e:
                verrors.add('catalog_create.label',
                            f'Failed to validate catalog: {e}')
            finally:
                await self.middleware.run_in_thread(shutil.rmtree,
                                                    path,
                                                    ignore_errors=True)
        else:
            job.set_progress(50, 'Skipping validation of catalog')

        verrors.check()

        job.set_progress(60, 'Completed Validation')

        await self.middleware.call('datastore.insert', self._config.datastore,
                                   data)
        job.set_progress(70, f'Successfully added {data["label"]!r} catalog')

        job.set_progress(80, f'Syncing {data["label"]} catalog')
        sync_job = await self.middleware.call('catalog.sync', data['label'])
        await sync_job.wait()
        if sync_job.error:
            raise CallError(
                f'Catalog was added successfully but failed to sync: {sync_job.error}'
            )

        job.set_progress(100, f'Successfully synced {data["label"]!r} catalog')

        return await self.get_instance(data['label'])

    @accepts(Str('id'),
             Dict('catalog_update', List('preferred_trains'), update=True))
    async def do_update(self, id, data):
        catalog = await self.query([['id', '=', id]], {
            'extra': {
                'item_details': True
            },
            'get': True
        })
        await self.common_validation(catalog, 'catalog_update', data)

        await self.middleware.call('datastore.update', self._config.datastore,
                                   id, data)

        return await self.get_instance(id)

    def do_delete(self, id):
        catalog = self.middleware.call_sync('catalog.get_instance', id)
        if catalog['builtin']:
            raise CallError('Builtin catalogs cannot be deleted')

        ret = self.middleware.call_sync('datastore.delete',
                                        self._config.datastore, id)

        if os.path.exists(catalog['location']):
            shutil.rmtree(catalog['location'], ignore_errors=True)

        # Let's delete any unhealthy alert if we had one
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogNotHealthy',
                                  id)
        self.middleware.call_sync('alert.oneshot_delete', 'CatalogSyncFailed',
                                  id)

        # Remove cached content of the catalog in question so that if a catalog is created again
        # with same label but different repo/branch, we don't reuse old cache
        self.middleware.call_sync('cache.pop', get_cache_key(id, True))
        self.middleware.call_sync('cache.pop', get_cache_key(id, False))

        return ret

    @private
    async def official_catalog_label(self):
        return OFFICIAL_LABEL
コード例 #11
0
class FilesystemService(Service):
    class Config:
        cli_namespace = 'storage.filesystem'

    @accepts(Str('path'))
    @returns(Bool())
    def is_immutable(self, path):
        """
        Retrieves boolean which is set when immutable flag is set on `path`.
        """
        return chflags.is_immutable_set(path)

    @accepts(Bool('set_flag'), Str('path'))
    @returns()
    def set_immutable(self, set_flag, path):
        """
        Set/Unset immutable flag at `path`.

        `set_flag` when set will set immutable flag and when unset will unset immutable flag at `path`.
        """
        chflags.set_immutable(path, set_flag)

    @accepts(
        Dict(
            'set_dosmode',
            Path('path', required=True),
            Dict('dosmode',
                 Bool('readonly'),
                 Bool('hidden'),
                 Bool('system'),
                 Bool('archive'),
                 Bool('reparse'),
                 Bool('offline'),
                 Bool('sparse'),
                 register=True),
        ))
    @returns()
    def set_dosmode(self, data):
        return dosmode.set_dosflags(data['path'], data['dosmode'])

    @accepts(Str('path'))
    @returns(Ref('dosmode'))
    def get_dosmode(self, path):
        return dosmode.get_dosflags(path)

    @private
    def is_cluster_path(self, path):
        return path.startswith(FuseConfig.FUSE_PATH_SUBST.value)

    @private
    def resolve_cluster_path(self, path, ignore_ctdb=False):
        """
        Convert a "CLUSTER:"-prefixed path to an absolute path
        on the server.
        """
        if not path.startswith(FuseConfig.FUSE_PATH_SUBST.value):
            return path

        gluster_volume = path[8:].split("/")[0]
        if gluster_volume == CTDBConfig.CTDB_VOL_NAME.value and not ignore_ctdb:
            raise CallError('access to ctdb volume is not permitted.',
                            errno.EPERM)
        elif not gluster_volume:
            raise CallError(
                f'More than the prefix "{FuseConfig.FUSE_PATH_SUBST.value}" must be provided'
            )

        is_mounted = self.middleware.call_sync('gluster.fuse.is_mounted',
                                               {'name': gluster_volume})
        if not is_mounted:
            raise CallError(
                f'{gluster_volume}: cluster volume is not mounted.',
                errno.ENXIO)

        cluster_path = path.replace(FuseConfig.FUSE_PATH_SUBST.value,
                                    f'{FuseConfig.FUSE_PATH_BASE.value}/')
        return cluster_path

    @accepts(Str('path'))
    @returns(Ref('path_entry'))
    def mkdir(self, path):
        """
        Create a directory at the specified path.
        """
        path = self.resolve_cluster_path(path)
        is_clustered = path.startswith("/cluster")

        p = pathlib.Path(path)
        if not p.is_absolute():
            raise CallError(f'{path}: not an absolute path.', errno.EINVAL)

        if p.exists():
            raise CallError(f'{path}: path already exists.', errno.EEXIST)

        realpath = os.path.realpath(path)
        if not is_clustered and not realpath.startswith('/mnt/'):
            raise CallError(f'{path}: path not permitted', errno.EPERM)

        os.mkdir(path)
        stat = p.stat()
        data = {
            'name': p.parts[-1],
            'path': path,
            'realpath': realpath,
            'type': 'DIRECTORY',
            'size': stat.st_size,
            'mode': stat.st_mode,
            'acl': False if self.acl_is_trivial(path) else True,
            'uid': stat.st_uid,
            'gid': stat.st_gid,
        }

        return data

    @private
    def stat_entry_impl(self, entry, options=None):
        out = {'st': None, 'etype': None, 'is_ctldir': False}
        opts = options or {"dir_only": False, "file_only": False}
        path = entry.absolute()

        try:
            out['st'] = entry.lstat()
        except FileNotFoundError:
            return None

        if statlib.S_ISDIR(out['st'].st_mode):
            out['etype'] = 'DIRECTORY'

        elif statlib.S_ISLNK(out['st'].st_mode):
            out['etype'] = 'SYMLINK'
            try:
                out['st'] = entry.stat()
            except FileNotFoundError:
                return None

        elif statlib.S_ISREG(out['st'].st_mode):
            out['etype'] = 'FILE'

        else:
            out['etype'] = 'OTHER'

        while str(path) != '/':
            if not path.name == '.zfs':
                path = path.parent
                continue

            if path.stat().st_ino == ZFSCTL.INO_ROOT:
                out['is_ctldir'] = True
                break

            path = path.parent

        if opts['dir_only'] and out['etype'] != 'DIRECTORY':
            return None

        elif opts['file_only'] and out['etype'] != 'FILE':
            return None

        return out

    @accepts(Str('path', required=True), Ref('query-filters'),
             Ref('query-options'))
    @filterable_returns(
        Dict('path_entry',
             Str('name', required=True),
             Path('path', required=True),
             Path('realpath', required=True),
             Str('type',
                 required=True,
                 enum=['DIRECTORY', 'FILE', 'SYMLINK', 'OTHER']),
             Int('size', required=True, null=True),
             Int('mode', required=True, null=True),
             Bool('acl', required=True, null=True),
             Int('uid', required=True, null=True),
             Int('gid', required=True, null=True),
             Bool('is_mountpoint', required=True),
             Bool('is_ctldir', required=True),
             register=True))
    def listdir(self, path, filters, options):
        """
        Get the contents of a directory.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILE | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
          is_mountpoint(bool): path is a mountpoint
          is_ctldir(bool): path is within special .zfs directory
        """

        path = self.resolve_cluster_path(path)
        path = pathlib.Path(path)
        if not path.exists():
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not path.is_dir():
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        if 'ix-applications' in path.parts:
            raise CallError(
                'Ix-applications is a system managed dataset and its contents cannot be listed'
            )

        stat_opts = {"file_only": False, "dir_only": False}
        for filter in filters:
            if filter[0] not in ['type']:
                continue

            if filter[1] != '=' and filter[2] not in ['DIRECTORY', 'FILE']:
                continue

            if filter[2] == 'DIRECTORY':
                stat_opts["dir_only"] = True
            else:
                stat_opts["file_only"] = True

        rv = []
        if stat_opts["dir_only"] and stat_opts["file_only"]:
            return rv

        only_top_level = path.absolute() == pathlib.Path('/mnt')
        for entry in path.iterdir():
            st = self.stat_entry_impl(entry, stat_opts)
            if st is None:
                continue

            if only_top_level and not entry.is_mount():
                # sometimes (on failures) the top-level directory
                # where the zpool is mounted does not get removed
                # after the zpool is exported. WebUI calls this
                # specifying `/mnt` as the path. This is used when
                # configuring shares in the "Path" drop-down. To
                # prevent shares from being configured to point to
                # a path that doesn't exist on a zpool, we'll
                # filter these here.
                continue
            if 'ix-applications' in entry.parts:
                continue

            etype = st['etype']
            stat = st['st']
            realpath = entry.resolve().as_posix(
            ) if etype == 'SYMLINK' else entry.absolute().as_posix()

            data = {
                'name':
                entry.name,
                'path':
                entry.as_posix().replace(f'{FuseConfig.FUSE_PATH_BASE.value}/',
                                         FuseConfig.FUSE_PATH_SUBST.value),
                'realpath':
                realpath,
                'type':
                etype,
                'size':
                stat.st_size,
                'mode':
                stat.st_mode,
                'acl':
                False if self.acl_is_trivial(realpath) else True,
                'uid':
                stat.st_uid,
                'gid':
                stat.st_gid,
                'is_mountpoint':
                entry.is_mount(),
                'is_ctldir':
                st['is_ctldir'],
            }

            rv.append(data)

        return filter_list(rv, filters=filters or [], options=options or {})

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_stats',
            Str('realpath', required=True),
            Int('size', required=True),
            Int('mode', required=True),
            Int('uid', required=True),
            Int('gid', required=True),
            Float('atime', required=True),
            Float('mtime', required=True),
            Float('ctime', required=True),
            Int('dev', required=True),
            Int('inode', required=True),
            Int('nlink', required=True),
            Bool('is_mountpoint', required=True),
            Bool('is_ctldir', required=True),
            Str('user', null=True, required=True),
            Str('group', null=True, required=True),
            Bool('acl', required=True),
        ))
    def stat(self, _path):
        """
        Return the filesystem stat(2) for a given `path`.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = pathlib.Path(self.resolve_cluster_path(_path))
        if not path.is_absolute():
            raise CallError(f'{_path}: path must be absolute', errno.EINVAL)

        st = self.stat_entry_impl(path, None)
        if st is None:
            raise CallError(f'Path {_path} not found', errno.ENOENT)

        realpath = path.resolve().as_posix(
        ) if st['etype'] == 'SYMLINK' else path.absolute().as_posix()

        stat = {
            'realpath': realpath,
            'type': st['etype'],
            'size': st['st'].st_size,
            'mode': st['st'].st_mode,
            'uid': st['st'].st_uid,
            'gid': st['st'].st_gid,
            'atime': st['st'].st_atime,
            'mtime': st['st'].st_mtime,
            'ctime': st['st'].st_ctime,
            'dev': st['st'].st_dev,
            'inode': st['st'].st_ino,
            'nlink': st['st'].st_nlink,
            'is_mountpoint': path.is_mount(),
            'is_ctldir': st['is_ctldir'],
        }

        try:
            stat['user'] = pwd.getpwuid(stat['uid']).pw_name
        except KeyError:
            stat['user'] = None

        try:
            stat['group'] = grp.getgrgid(stat['gid']).gr_name
        except KeyError:
            stat['group'] = None

        stat['acl'] = False if self.acl_is_trivial(_path) else True

        return stat

    @private
    @accepts(
        Str('path'),
        Str('content', max_length=2048000),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    def file_receive(self, path, content, options):
        """
        Simplified file receiving method for small files.

        `content` must be a base 64 encoded file content.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'
        with open(path, openmode) as f:
            f.write(binascii.a2b_base64(content))
        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        if path == PWENC_FILE_SECRET:
            self.middleware.call_sync('pwenc.reset_secret_cache')
        return True

    @private
    @accepts(
        Str('path'),
        Dict(
            'options',
            Int('offset'),
            Int('maxlen'),
        ),
    )
    def file_get_contents(self, path, options):
        """
        Get contents of a file `path` in base64 encode.

        DISCLAIMER: DO NOT USE THIS FOR BIG FILES (> 500KB).
        """
        if not os.path.exists(path):
            return None
        with open(path, 'rb') as f:
            if options.get('offset'):
                f.seek(options['offset'])
            data = binascii.b2a_base64(f.read(
                options.get('maxlen'))).decode().strip()
        return data

    @accepts(Str('path'))
    @returns()
    @job(pipes=["output"])
    async def get(self, job, path):
        """
        Job to get contents of `path`.
        """

        if not os.path.isfile(path):
            raise CallError(f'{path} is not a file')

        with open(path, 'rb') as f:
            await self.middleware.run_in_thread(shutil.copyfileobj, f,
                                                job.pipes.output.w)

    @accepts(
        Str('path'),
        Dict(
            'options',
            Bool('append', default=False),
            Int('mode'),
        ),
    )
    @returns(Bool('successful_put'))
    @job(pipes=["input"])
    async def put(self, job, path, options):
        """
        Job to put contents to `path`.
        """
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        if options.get('append'):
            openmode = 'ab'
        else:
            openmode = 'wb+'

        with open(path, openmode) as f:
            await self.middleware.run_in_thread(shutil.copyfileobj,
                                                job.pipes.input.r, f)

        mode = options.get('mode')
        if mode:
            os.chmod(path, mode)
        return True

    @accepts(Str('path'))
    @returns(
        Dict(
            'path_statfs',
            List('flags', required=True),
            List('fsid', required=True),
            Str('fstype', required=True),
            Str('source', required=True),
            Str('dest', required=True),
            Int('blocksize', required=True),
            Int('total_blocks', required=True),
            Int('free_blocks', required=True),
            Int('avail_blocks', required=True),
            Int('files', required=True),
            Int('free_files', required=True),
            Int('name_max', required=True),
            Int('total_bytes', required=True),
            Int('free_bytes', required=True),
            Int('avail_bytes', required=True),
        ))
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Raises:
            CallError(ENOENT) - Path not found
        """
        # check to see if this is a clustered path and if it is
        # resolve it to an absolute path
        # NOTE: this converts path prefixed with 'CLUSTER:' to '/cluster/...'
        path = self.resolve_cluster_path(path, ignore_ctdb=True)

        allowed_prefixes = ('/mnt/', FuseConfig.FUSE_PATH_BASE.value)
        if not path.startswith(allowed_prefixes):
            # if path doesn't start with '/mnt/' bail early
            raise CallError(
                f'Path must start with {" or ".join(allowed_prefixes)}')
        elif path == '/mnt/':
            # means the path given to us was a literal '/mnt/' which is incorrect.
            # NOTE: if the user provided 'CLUSTER:' as the literal path then
            # self.resolve_cluster_path() will raise a similar error
            raise CallError('Path must include more than "/mnt/"')

        try:
            st = os.statvfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)

        # get the closest mountpoint to the path provided
        mountpoint = pathlib.Path(path)
        while not mountpoint.is_mount():
            mountpoint = mountpoint.parent.absolute()

        # strip the `/mnt/` or `/cluster/` prefix from the mountpoint
        device = mountpoint.as_posix().removeprefix('/mnt/')
        device = device.removeprefix('/cluster/')

        # get fstype for given path based on major:minor entry in mountinfo
        stx = stat_x.statx(path)
        maj_min = f'{stx.stx_dev_major}:{stx.stx_dev_minor}'
        fstype = None
        flags = []
        with open('/proc/self/mountinfo') as f:
            # example lines look like this. We use `find()` to keep the `.split()` calls to only 2 (instead of 3)
            # (minor optimization, but still one nonetheless)
            # "26 1 0:23 / / rw,relatime shared:1 - zfs boot-pool/ROOT/22.02-MASTER-20211129-015838 rw,xattr,posixacl"
            # OR
            # "129 26 0:50 / /mnt/data rw,noatime shared:72 - zfs data rw,xattr,posixacl"
            for line in f:
                if line.find(maj_min) != -1:
                    fstype = line.rsplit(' - ')[1].split()[0]
                    """
                    Following gets mount flags. For filesystems, there are two
                    varieties. First are flags returned by statfs(2) on Linux which
                    are defined in manpage. These are located in middle of mountinfo line.
                    The second info group is at end of mountinfo string and contains
                    superblock info returned from FS. We attempt to consilidate this
                    disparate info here.
                    """
                    unsorted_info, mount_flags = line.rsplit(' ', 1)
                    flags = mount_flags.strip().upper().split(',')

                    offset = unsorted_info.find(flags[0].lower())
                    other_flags = unsorted_info[offset:].split()[0]

                    for f in other_flags.split(','):
                        flag = f.upper()
                        if flag in flags:
                            continue

                        flags.append(flag)
                    break

        return {
            'flags': flags,
            'fstype': fstype,
            'source': device,
            'dest': mountpoint.as_posix(),
            'blocksize': st.f_frsize,
            'total_blocks': st.f_blocks,
            'free_blocks': st.f_bfree,
            'avail_blocks': st.f_bavail,
            'files': st.f_files,
            'free_files': st.f_ffree,
            'name_max': st.f_namemax,
            'fsid': [],
            'total_bytes': st.f_blocks * st.f_frsize,
            'free_bytes': st.f_bfree * st.f_frsize,
            'avail_bytes': st.f_bavail * st.f_frsize,
        }

    @accepts(Str('path'))
    @returns(Bool('paths_acl_is_trivial'))
    def acl_is_trivial(self, path):
        """
        Returns True if the ACL can be fully expressed as a file mode without losing
        any access rules.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.
        """
        path = self.resolve_cluster_path(path)
        if not os.path.exists(path):
            raise CallError(f'Path not found [{path}].', errno.ENOENT)

        acl_xattrs = ACLType.xattr_names()
        xattrs_present = set(os.listxattr(path))

        return False if (xattrs_present & acl_xattrs) else True