Пример #1
0
class ZpoolProvider(Provider):
    @description("Lists ZFS pools")
    @query('zfs-pool')
    def query(self, filter=None, params=None):
        zfs = libzfs.ZFS()
        return wrap(zfs).query(*(filter or []), **(params or {}))

    @accepts()
    @returns(h.array(h.ref('zfs-pool')))
    def find(self):
        zfs = libzfs.ZFS()
        return list(map(lambda p: p.__getstate__(), zfs.find_import()))

    @accepts()
    @returns(h.ref('zfs-pool'))
    def get_boot_pool(self):
        name = self.configstore.get('system.boot_pool_name')
        zfs = libzfs.ZFS()
        return zfs.get(name).__getstate__()

    @accepts(str)
    @returns(h.array(str))
    def get_disks(self, name):
        try:
            zfs = libzfs.ZFS()
            pool = zfs.get(name)
            return pool.disks
        except libzfs.ZFSException, err:
            raise RpcException(errno.EFAULT, str(err))
Пример #2
0
class SwapProvider(Provider):
    @accepts()
    @returns(h.array(h.ref('swap-mirror')))
    @description("Returns information about swap mirrors present in the system"
                 )
    def info(self):
        return get_swap_info(self.dispatcher).values()
Пример #3
0
class SupportProvider(Provider):
    @accepts(str, str)
    @returns(h.array(str))
    def categories(self, user, password):
        sw_name = self.dispatcher.call_sync('system.info.version').split(
            '-')[0].lower()
        try:
            r = requests.post(
                'https://%s/%s/api/v1.0/categories' % (ADDRESS, sw_name),
                data=json.dumps({
                    'user': user,
                    'password': password,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError as e:
            logger.debug('Failed to decode ticket attachment response: %s',
                         r.text)
            raise RpcException(errno.EINVAL,
                               'Failed to decode ticket response')
        except requests.ConnectionError as e:
            raise RpcException(errno.ENOTCONN,
                               'Connection failed: {0}'.format(str(e)))
        except requests.Timeout as e:
            raise RpcException(errno.ETIMEDOUT,
                               'Connection timed out: {0}'.format(str(e)))

        if 'error' in data:
            raise RpcException(errno.EINVAL, data['message'])

        return data
Пример #4
0
class SystemAdvancedProvider(Provider):

    @accepts()
    @returns(h.ref('system-advanced'))
    def get_config(self):
        cs = self.configstore
        return {
            'console_cli': cs.get('system.console.cli'),
            'console_screensaver': cs.get('system.console.screensaver'),
            'serial_console': cs.get('system.serial.console'),
            'serial_port': cs.get('system.serial.port'),
            'serial_speed': cs.get('system.serial.speed'),
            'powerd': cs.get('service.powerd.enable'),
            'swapondrive': cs.get('system.swapondrive'),
            'autotune': cs.get('system.autotune'),
            'debugkernel': cs.get('system.debug.kernel'),
            'uploadcrash': cs.get('system.upload_crash'),
            'motd': cs.get('system.motd'),
            'boot_scrub_internal': cs.get('system.boot_scrub_internal'),
            'periodic_notify_user': cs.get('system.periodic.notify_user'),
        }

    @description('Returns array of serial port address')
    @accepts()
    @returns(h.array(str))
    def serial_ports(self):
        return filter(
            lambda y: bool(y),
            system(
                "/usr/sbin/devinfo -u | grep uart | grep 0x | cut -d- -f 1 | awk '{print $1}'",
                shell=True)[0].strip('\n').split('\n'))
Пример #5
0
class SessionProvider(Provider):
    @query('session')
    def query(self, filter=None, params=None):
        return self.datastore.query('sessions',
                                    *(filter or []),
                                    **(params or {}))

    @accepts()
    @returns(h.array(h.ref('sessions')))
    @description("Returns the logged in and active user sessions" +
                 "Does not include the service sessions in this.")
    def get_live_user_sessions(self):
        live_user_session_ids = []
        for conn in self.dispatcher.ws_server.connections:
            # The if check for 'uid' below is to seperate the actuall gui/cli
            # users of the websocket connection from that of system services
            # like etcd, statd and so on.
            if hasattr(conn.user, 'uid'):
                live_user_session_ids.append(conn.session_id)
        return self.datastore.query('sessions',
                                    ('id',
                                     'in',
                                     live_user_session_ids),
                                    **({}))

    @description("Returns the loggedin user for the current session")
    @returns(str)
    @pass_sender
    def whoami(self, sender):
        return sender.user.name
Пример #6
0
class SystemInfoProvider(Provider):
    def __init__(self):
        self.__version = None

    @accepts()
    @returns(h.array(str))
    def uname_full(self):
        return os.uname()

    @accepts()
    @returns(str)
    @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.")
    def version(self):
        if self.__version is None:
            # See #9113
            conf = Configuration.Configuration()
            manifest = conf.SystemManifest()
            if manifest:
                self.__version = manifest.Version()
            else:
                with open(VERSION_FILE) as fd:
                    self.__version = fd.read().strip()

        return self.__version

    @accepts()
    @returns(float, float, float)
    def load_avg(self):
        return os.getloadavg()

    @accepts()
    @returns(h.object(properties={
        'cpu_model': str,
        'cpu_cores': int,
        'memory_size': long,
    }))
    def hardware(self):
        return {
            'cpu_model': get_sysctl("hw.model"),
            'cpu_cores': get_sysctl("hw.ncpu"),
            'memory_size': get_sysctl("hw.physmem")
        }

    @accepts()
    @returns(h.object(properties={
        'system_time': str,
        'boot_time': str,
        'uptime': str,
        'timezone': str,
    }))
    def time(self):
        boot_time = datetime.fromtimestamp(psutil.BOOT_TIME, tz=tz.tzlocal())
        return {
            'system_time': datetime.now(tz=tz.tzlocal()).isoformat(),
            'boot_time': boot_time.isoformat(),
            'uptime': (datetime.now(tz=tz.tzlocal()) - boot_time).total_seconds(),
            'timezone': time.tzname[time.daylight],
        }
Пример #7
0
class SystemGeneralProvider(Provider):

    @accepts()
    @returns(h.ref('system-general'))
    def get_config(self):
        return {
            'hostname': self.configstore.get('system.hostname'),
            'language': self.configstore.get('system.language'),
            'timezone': self.configstore.get('system.timezone'),
            'syslog_server': self.configstore.get('system.syslog_server'),
            'console_keymap': self.configstore.get('system.console.keymap')
        }

    @accepts()
    @returns(h.array(h.array(str)))
    def keymaps(self):
        if not os.path.exists(KEYMAPS_INDEX):
            return []

        rv = []
        with open(KEYMAPS_INDEX, 'r') as f:
            d = f.read()
        fnd = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d, re.M)
        for name, desc in fnd:
            rv.append((name, desc))
        return rv

    @accepts()
    @returns(h.array(str))
    def timezones(self):
        result = []
        for root, _, files in os.walk(ZONEINFO_DIR):
            for f in files:
                if f in (
                    'zone.tab',
                ):
                    continue
                result.append(os.path.join(root, f).replace(
                    ZONEINFO_DIR + '/', '')
                )
        return result
Пример #8
0
class NetworkProvider(Provider):
    @returns(h.ref('network-config'))
    def get_global_config(self):
        return ConfigNode('network', self.configstore)

    @returns(h.array(str))
    def get_my_ips(self):
        ips = []
        ifaces = self.dispatcher.call_sync(
            'networkd.configuration.query_interfaces')
        for i, v in ifaces.iteritems():
            if 'LOOPBACK' in v['flags']:
                continue
            for aliases in v['aliases']:
                if aliases['address'] and aliases['family'] != 'LINK':
                    ips.append(aliases['address'])
        return ips
Пример #9
0
class SharesProvider(Provider):
    @query('share')
    def query(self, filter=None, params=None):
        return self.datastore.query('shares', *(filter or []), **(params or {}))

    @description("Returns list of supported sharing providers")
    @returns(h.array(str))
    def supported_types(self):
        result = []
        for p in self.dispatcher.plugins.values():
            if p.metadata and p.metadata.get('type') == 'sharing':
                result.append(p.metadata['method'])

        return result

    @description("Returns list of clients connected to particular share")
    def get_connected_clients(self, share_name):
        share = self.datastore.get_by_id('shares', share_name)
        if not share:
            raise RpcException(errno.ENOENT, 'Share not found')

        return self.dispatcher.call_sync('shares.{0}.get_connected_clients'.format(share['type']), share_name)
Пример #10
0
class UpdateProvider(Provider):
    @accepts()
    @returns(str)
    def is_update_available(self):
        temp_updateAvailable = update_cache.get('updateAvailable', timeout=1)
        if temp_updateAvailable is not None:
            return temp_updateAvailable
        elif update_cache.is_valid('updateAvailable'):
            return temp_updateAvailable
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(str))
    def obtain_changelog(self):
        temp_changelog = update_cache.get('changelog', timeout=1)
        if temp_changelog is not None:
            return temp_changelog
        elif update_cache.is_valid('changelog'):
            return temp_changelog
        else:
            raise RpcException(
                errno.EBUSY, ('Changelog list is invalidated, an Update Check '
                              'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(h.ref('update-ops')))
    def get_update_ops(self):
        temp_updateOperations = update_cache.get('updateOperations', timeout=1)
        if temp_updateOperations is not None:
            return temp_updateOperations
        elif update_cache.is_valid('updateOperations'):
            return temp_updateOperations
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Operations Dict is invalidated, an Update Check '
                 'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.any_of(
        h.ref('update-info'),
        None,
    ))
    def update_info(self):
        if not update_cache.is_valid('updateAvailable'):
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))
        updateAvailable = update_cache.get('updateAvailable', timeout=1)
        if not updateAvailable:
            return None
        updateOperations = update_cache.get('updateOperations', timeout=1)
        updateNotes = update_cache.get('updateNotes', timeout=1)
        updateNotice = update_cache.get('updateNotice', timeout=1)
        changelog = update_cache.get('changelog', timeout=1)
        return {
            'changelog': changelog,
            'notes': updateNotes,
            'notice': updateNotice,
            'operations': updateOperations,
        }

    @returns(h.array(h.ref('update-train')))
    def trains(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        trains = conf.AvailableTrains() or {}

        seltrain = self.dispatcher.configstore.get('update.train')

        data = []
        for name in trains.keys():
            if name in conf._trains:
                train = conf._trains.get(name)
            else:
                train = Train.Train(name)
            data.append({
                'name': train.Name(),
                'description': train.Description(),
                'sequence': train.LastSequence(),
                'current': True if name == seltrain else False,
            })
        return data

    @accepts()
    @returns(str)
    def get_current_train(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        return conf.CurrentTrain()

    @accepts()
    @returns(h.ref('update'))
    def get_config(self):
        return {
            'train': self.dispatcher.configstore.get('update.train'),
            'check_auto': self.dispatcher.configstore.get('update.check_auto'),
            'update_server': Configuration.Configuration().UpdateServerURL(),
        }
Пример #11
0
    def run(self, name):
        try:
            zfs = libzfs.ZFS()
            zfs.destroy(name)
        except libzfs.ZFSException, err:
            raise TaskException(errno.EFAULT, str(err))


@accepts(
    str,
    h.any_of(
        h.ref('zfs-topology'),
        None
    ),
    h.any_of(
        h.array(h.ref('zfs-vdev-extension')),
        None
    )
)
class ZpoolExtendTask(ZpoolBaseTask):
    def __init__(self, dispatcher, datastore):
        super(ZpoolExtendTask, self).__init__(dispatcher, datastore)
        self.pool = None
        self.started = False

    def run(self, pool, new_vdevs, updated_vdevs):
        try:
            self.pool = pool
            zfs = libzfs.ZFS()
            pool = zfs.get(pool)
Пример #12
0
                ('severity', '=', alert['severity']),
            )
            if afilter:
                emitters = afilter['emitters']

        # If there are no filters configured, set default emitters
        if emitters is None:
            if alert['severity'] == 'CRITICAL':
                emitters = ['UI', 'EMAIL']
            else:
                emitters = ['UI']

        if 'UI' in emitters:
            self.datastore.insert('alerts', alert)

    @returns(h.array(str))
    def get_registered_alerts(self):
        return registered_alerts

    @accepts(str)
    def register_alert(self, name, verbose_name=None):
        if name not in registered_alerts:
            registered_alerts[name] = {
                'name': name,
                'verbose_name': verbose_name,
            }


@description('Provides access to the alerts filters')
class AlertsFiltersProvider(Provider):
Пример #13
0
class FilesystemProvider(Provider):
    @description("Lists contents of given directory")
    @accepts(str)
    @returns(h.array(h.ref('directory')))
    def list_dir(self, path):
        result = []
        if not os.path.isdir(path):
            raise RpcException(errno.ENOENT,
                               'Path {0} is not a directory'.format(path))

        for i in os.listdir(path):
            try:
                st = os.stat(os.path.join(path, i))
            except OSError:
                continue

            item = {
                'name': i,
                'type': get_type(st),
                'size': st.st_size,
                'modified': st.st_mtime
            }

            result.append(item)

        return result

    @accepts(str)
    @returns(h.ref('stat'))
    def stat(self, path):
        try:
            st = os.stat(path)
        except OSError, err:
            raise RpcException(err.errno, str(err))

        return {
            'path': path,
            'type': get_type(st),
            'atime': st.st_atime,
            'mtime': st.st_mtime,
            'ctime': st.st_ctime,
            'uid': st.st_uid,
            'gid': st.st_gid,
            'permissions': {
                'user': {
                    'read': st.st_mode & stat.S_IRUSR,
                    'write': st.st_mode & stat.S_IWUSR,
                    'execute': st.st_mode & stat.S_IXUSR
                },
                'group': {
                    'read': st.st_mode & stat.S_IRGRP,
                    'write': st.st_mode & stat.S_IWGRP,
                    'execute': st.st_mode & stat.S_IXGRP
                },
                'others': {
                    'read': st.st_mode & stat.S_IROTH,
                    'write': st.st_mode & stat.S_IWOTH,
                    'execute': st.st_mode & stat.S_IXOTH
                },
            }
        }
Пример #14
0
                })

            return vol

        return self.datastore.query('volumes',
                                    *(filter or []),
                                    callback=extend,
                                    **(params or {}))

    @description("Finds volumes available for import")
    @accepts()
    @returns(
        h.array(
            h.object(
                properties={
                    'id': str,
                    'name': str,
                    'topology': h.ref('zfs-topology'),
                    'status': str
                })))
    def find(self):
        result = []
        for pool in self.dispatcher.call_sync('zfs.pool.find'):
            topology = pool['groups']
            for vdev, _ in iterate_vdevs(topology):
                try:
                    vdev['path'] = self.dispatcher.call_sync(
                        'disks.partition_to_disk', vdev['path'])
                except RpcException:
                    pass

            if self.datastore.exists('volumes', ('id', '=', pool['guid'])):
Пример #15
0
        except libzfs.ZFSException, err:
            raise TaskException(errno.EFAULT, str(err))


@accepts(str)
class ZpoolDestroyTask(ZpoolBaseTask):
    def run(self, name):
        try:
            zfs = libzfs.ZFS()
            zfs.destroy(name)
        except libzfs.ZFSException, err:
            raise TaskException(errno.EFAULT, str(err))


@accepts(str, h.any_of(h.ref('zfs-topology'), None),
         h.any_of(h.array(h.ref('zfs-vdev-extension')), None))
class ZpoolExtendTask(ZpoolBaseTask):
    def __init__(self, dispatcher, datastore):
        super(ZpoolExtendTask, self).__init__(dispatcher, datastore)
        self.pool = None
        self.started = False

    def run(self, pool, new_vdevs, updated_vdevs):
        try:
            self.pool = pool
            zfs = libzfs.ZFS()
            pool = zfs.get(pool)

            if new_vdevs:
                nvroot = convert_topology(zfs, new_vdevs)
                pool.attach_vdevs(nvroot)
Пример #16
0
                    'upgraded': is_upgraded(config),
                    'scan': config['scan'],
                    'properties': config['properties'],
                    'datasets': map(extend_dataset, flatten_datasets(config['root_dataset']))
                })

            return vol

        return self.datastore.query('volumes', *(filter or []), callback=extend, **(params or {}))

    @description("Finds volumes available for import")
    @accepts()
    @returns(h.array(
        h.object(properties={
            'id': str,
            'name': str,
            'topology': h.ref('zfs-topology'),
            'status': str
        })
    ))
    def find(self):
        result = []
        for pool in self.dispatcher.call_sync('zfs.pool.find'):
            topology = pool['groups']
            for vdev, _ in iterate_vdevs(topology):
                try:
                    vdev['path'] = self.dispatcher.call_sync(
                        'disks.partition_to_disk',
                        vdev['path']
                    )
                except RpcException:
                    pass
Пример #17
0
class DeviceInfoPlugin(Provider):
    @description("Returns list of available device classes")
    @returns(h.array(str))
    def get_classes(self):
        return ["disk", "network", "cpu"]

    @description("Returns list of devices from given class")
    @accepts(str)
    @returns(
        h.any_of(h.ref('disk-device'), h.ref('network-device'),
                 h.ref('cpu-device')))
    def get_devices(self, dev_class):
        method = "_get_class_{0}".format(dev_class)
        if hasattr(self, method):
            return getattr(self, method)()

        return None

    def _get_class_disk(self):
        result = []
        geom.scan()
        for child in geom.class_by_name('DISK').geoms:
            result.append({
                "path": os.path.join("/dev", child.name),
                "name": child.name,
                "mediasize": child.provider.mediasize,
                "description": child.provider.config['descr']
            })

        return result

    def _get_class_multipath(self):
        result = []
        geom.scan()
        cls = geom.class_by_name('MULTIPATH')
        if not cls:
            return []

        for child in cls.geoms:
            result.append({
                "path": os.path.join("/dev", child.name),
                "name": child.name,
                "mediasize": child.provider.mediasize,
                "members": [c.provider.name for c in child.consumers]
            })

        return result

    def _get_class_network(self):
        result = []
        for i in netif.list_interfaces().keys():
            if i.startswith('lo'):
                continue

            desc = get_sysctl(re.sub('(\w+)([0-9]+)', 'dev.\\1.\\2.%desc', i))
            result.append({'name': i, 'description': desc})

        return result

    def _get_class_cpu(self):
        pass
Пример #18
0
                ('severity', '=', alert['severity']),
            )
            if afilter:
                emitters = afilter['emitters']

        # If there are no filters configured, set default emitters
        if emitters is None:
            if alert['severity'] == 'CRITICAL':
                emitters = ['UI', 'EMAIL']
            else:
                emitters = ['UI']

        if 'UI' in emitters:
            self.datastore.insert('alerts', alert)

    @returns(h.array(str))
    def get_registered_alerts(self):
        return registered_alerts

    @accepts(str)
    def register_alert(self, name, verbose_name=None):
        if name not in registered_alerts:
            registered_alerts[name] = {
                'name': name,
                'verbose_name': verbose_name,
            }


@description('Provides access to the alerts filters')
class AlertsFiltersProvider(Provider):
    @query('alert-filter')