Beispiel #1
0
class StatProvider(Provider):
    @query('Statistic')
    @generator
    def query(self, filter=None, params=None):
        stats = self.dispatcher.call_sync('statd.output.get_current_state')
        return q.query(stats, *(filter or []), stream=True, **(params or {}))

    @returns(h.array(str))
    @generator
    def get_data_sources(self):
        return self.dispatcher.call_sync('statd.output.get_data_sources')

    def get_data_sources_tree(self):
        return self.dispatcher.call_sync('statd.output.get_data_sources_tree')

    @accepts(h.one_of(str, h.array(str)), h.ref('GetStatsParams'))
    @returns(h.ref('GetStatsResult'))
    def get_stats(self, data_source, params):
        return {
            'data':
            list(
                self.dispatcher.call_sync('statd.output.get_stats',
                                          data_source, params))
        }

    def normalize(self, name, value):
        return normalize(name, value)
Beispiel #2
0
class UPSProvider(Provider):
    @private
    @accepts()
    @returns(h.ref('ServiceUps'))
    def get_config(self):
        return ConfigNode('service.ups', self.configstore).__getstate__()

    @accepts()
    @returns(h.array(h.array(str)))
    def drivers(self):
        driver_list = '/etc/local/nut/driver.list'
        if not os.path.exists(driver_list):
            return []
        drivers = []
        with open(driver_list, 'rb') as f:
            d = f.read()
        r = io.StringIO()
        for line in re.sub(r'[ \t]+', ' ', d.decode('utf-8'),
                           flags=re.M).split('\n'):
            r.write(line.strip() + '\n')
        r.seek(0)
        reader = csv.reader(r, delimiter=' ', quotechar='"')
        for row in reader:
            if len(row) == 0 or row[0].startswith('#'):
                continue
            if row[-2] == '#':
                last = -3
            else:
                last = -1
            if row[last].find(' (experimental)') != -1:
                row[last] = row[last].replace(' (experimental)', '').strip()
            drivers.append({
                'driver_name':
                row[last],
                'description':
                '{0} ({1})'.format(' '.join(row[0:last]), row[last])
            })
        return drivers

    @accepts()
    @returns(h.array(h.array(str)))
    def get_usb_devices(self):
        usb_devices_list = []
        try:
            usbconfig_output = system('usbconfig')[0]
            if not usbconfig_output.startswith('No device match'):
                for device in usbconfig_output.rstrip().split('\n'):
                    device_path = os.path.join('/dev', device.split()[0][:-1])
                    device_description = re.findall(r'<.*?>',
                                                    device)[0].strip('><')
                    usb_devices_list.append({
                        'device': device_path,
                        'description': device_description
                    })

        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

        return usb_devices_list
Beispiel #3
0
class SupportProvider(Provider):
    @accepts(str, Password)
    @returns(h.array(str))
    def categories(self, user, password):
        version = self.dispatcher.call_sync('system.info.version')
        sw_name = version.split('-')[0].lower()
        try:
            r = requests.post(
                'https://%s/%s/api/v1.0/categories' % (PROXY_ADDRESS, sw_name),
                data=json.dumps({
                    'user': user,
                    'password': unpassword(password),
                    'project': REDMINE_PROJECT_NAME,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError as e:
            logger.debug('Failed to decode ticket attachment response: %s', e.text)
            raise RpcException(errno.EINVAL, 'Failed to decode ticket response')
        except requests.ConnectionError as e:
            raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e)))
        except requests.Timeout as e:
            raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e)))

        if 'error' in data:
            raise RpcException(errno.EINVAL, data['message'])

        return data

    @returns(h.array(str))
    def categories_no_auth(self):
        version = self.dispatcher.call_sync('system.info.version')
        sw_name = version.split('-')[0].lower()
        try:
            r = requests.post(
                'https://%s/%s/api/v1.0/categoriesnoauth' % (PROXY_ADDRESS, sw_name),
                data=json.dumps({'project': REDMINE_PROJECT_NAME}),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError as e:
            logger.debug('Failed to decode ticket attachment response: %s', r.text)
            raise RpcException(errno.EINVAL, 'Failed to decode ticket response')
        except requests.ConnectionError as e:
            raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e)))
        except requests.Timeout as e:
            raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e)))

        if 'error' in data:
            raise RpcException(errno.EINVAL, data['message'])

        return data
class SystemGeneralProvider(Provider):
    @accepts()
    @returns(h.ref('SystemGeneral'))
    def get_config(self):
        return {
            'hostname': self.configstore.get('system.hostname'),
            'description': self.configstore.get('system.description'),
            'tags': self.configstore.get('system.tags'),
            'language': self.configstore.get('system.language'),
            'timezone': self.configstore.get('system.timezone'),
            'syslog_server': self.configstore.get('system.syslog_server'),
            'console_keymap': self.configstore.get('system.console.keymap')
        }

    @accepts()
    @returns(h.array(h.array(str)))
    def keymaps(self):
        if not os.path.exists(KEYMAPS_INDEX):
            return []

        rv = []
        with open(KEYMAPS_INDEX, 'r', encoding='utf-8', errors='ignore') as f:
            d = f.read()
        fnd = re.findall(r'^(?P<name>[^#\s]+?)\.kbd:en:(?P<desc>.+)$', d, re.M)
        for name, desc in fnd:
            rv.append((name, desc))
        return rv

    @accepts()
    @returns(h.array(str))
    def timezones(self):
        result = []
        for root, _, files in os.walk(ZONEINFO_DIR):
            for f in files:
                if f in ('zone.tab', 'posixrules'):
                    continue

                result.append(
                    os.path.join(root, f).replace(ZONEINFO_DIR + '/', ''))

        return sorted(result)

    @private
    @accepts(str, str)
    @returns(str)
    def cowsay(self, line, cow_file='default'):
        if cow_file != 'default' and os.path.exists(cow_file):
            return system('/usr/local/bin/cowsay', '-f', cow_file, '-s', line)
        else:
            return system('/usr/local/bin/cowsay', '-s', line)
Beispiel #5
0
class PeerProvider(Provider):
    @query('Peer')
    @generator
    def query(self, filter=None, params=None):
        def extend_query():
            for t in self.peer_types():
                for peer in self.dispatcher.call_sync(f'peer.{t}.query', [],
                                                      {'exclude': 'status'}):
                    peer['status'] = peers_status.get(peer['id'], {
                        'state': 'UNKNOWN',
                        'rtt': None
                    })
                    yield peer

        return q.query(extend_query(),
                       *(filter or []),
                       stream=True,
                       **(params or {}))

    @returns(h.array(str))
    def peer_types(self):
        result = []
        for p in self.dispatcher.plugins.values():
            if p.metadata and p.metadata.get('type') == 'peering':
                result.append(p.metadata.get('subtype'))

        return result
Beispiel #6
0
class SwapProvider(Provider):
    @accepts()
    @returns(h.array(h.ref('SwapMirror')))
    @description("Returns information about swap mirrors present in the system"
                 )
    def info(self):
        return list(get_swap_info(self.dispatcher).values())
Beispiel #7
0
class NetworkProvider(Provider):
    @returns(h.ref('network-config'))
    def get_config(self):
        node = ConfigNode('network', self.configstore).__getstate__()
        node.update({
            'gateway':
            self.dispatcher.call_sync(
                'networkd.configuration.get_default_routes'),
            'dns':
            self.dispatcher.call_sync('networkd.configuration.get_dns_config')
        })

        return node

    @returns(h.array(str))
    def get_my_ips(self):
        ips = []
        ifaces = self.dispatcher.call_sync(
            'networkd.configuration.query_interfaces')
        ifaces.pop('mgmt0', None)
        for i, v in ifaces.items():
            if 'LOOPBACK' in v['flags'] or v[
                    'link_state'] != 'LINK_STATE_UP' or 'UP' not in v['flags']:
                continue

            for aliases in v['aliases']:
                if aliases['address'] and aliases['type'] != 'LINK':
                    ips.append(aliases['address'])

        return list(set(ips))
Beispiel #8
0
class IPMIProvider(Provider):
    @accepts()
    @returns(bool)
    def is_ipmi_loaded(self):
        return os.path.exists('/dev/ipmi0')

    @accepts()
    @returns(h.array(int))
    def channels(self):
        return channels

    @query('ipmi')
    @generator
    def query(self, filter=None, params=None):
        if not self.is_ipmi_loaded():
            raise RpcException(errno.ENXIO, 'The IPMI device could not be found')

        result = []
        for channel in self.channels():
            try:
                out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel))
            except SubprocessException as e:
                raise RpcException(errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format(e.err.strip()))

            raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)}
            ret = {IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP}
            ret['id'] = channel
            ret['vlan_id'] = None if ret.get('vlan_id') == 'Disabled' else ret.get('vlan_id')
            ret['dhcp'] = True if ret.get('dhcp') == 'DHCP Address' else False
            result.append(ret)

        return q.query(result, *(filter or []), stream=True, **(params or {}))
class SystemInfoProvider(Provider):
    def __init__(self):
        self.__version = None

    @accepts()
    @returns(h.array(str))
    def uname_full(self):
        return os.uname()

    @accepts()
    @returns(str)
    @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.")
    def version(self):
        if self.__version is None:
            # See #9113
            conf = Configuration.Configuration()
            manifest = conf.SystemManifest()
            if manifest:
                self.__version = manifest.Version()
            else:
                with open(VERSION_FILE) as fd:
                    self.__version = fd.read().strip()

        return self.__version

    @accepts()
    @returns({'type': 'array', 'items': {'type': 'number'}, 'maxItems': 3, 'minItems': 3})
    def load_avg(self):
        return list(os.getloadavg())

    @accepts()
    @returns(h.object(properties={
        'cpu_model': str,
        'cpu_cores': int,
        'cpu_clockrate': int,
        'memory_size': int,
        'vm_guest': h.one_of(str, None)
    }))
    def hardware(self):
        vm_guest = get_sysctl("kern.vm_guest")
        return {
            'cpu_model': get_sysctl("hw.model"),
            'cpu_cores': get_sysctl("hw.ncpu"),
            'cpu_clockrate': get_sysctl("hw.clockrate"),
            'memory_size': get_sysctl("hw.physmem"),
            'vm_guest': None if vm_guest == 'none' else vm_guest
        }

    @accepts()
    @returns(str)
    def host_uuid(self):
        return get_sysctl("kern.hostuuid")[:-1]
Beispiel #10
0
class PeerProvider(Provider):
    @query('Peer')
    @generator
    def query(self, filter=None, params=None):
        def extend(peer):
            peer['status'] = peers_status.get(peer['id'], {
                'state': 'UNKNOWN',
                'rtt': None
            })
            return peer

        return q.query(self.datastore.query_stream('peers', callback=extend),
                       *(filter or []),
                       stream=True,
                       **(params or {}))

    @returns(h.array(str))
    def peer_types(self):
        result = []
        for p in self.dispatcher.plugins.values():
            if p.metadata and p.metadata.get('type') == 'peering':
                result.append(p.metadata.get('subtype'))

        return result
Beispiel #11
0
import errno
import logging
import os
import shutil
import ipfsapi
from requests.exceptions import ConnectionError
from datastore.config import ConfigNode
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns, private
from task import Task, Provider, TaskException, ValidationException, TaskDescription

logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}
Beispiel #12
0
class UPSProvider(Provider):
    @private
    @accepts()
    @returns(h.ref('service-ups'))
    def get_config(self):
        return ConfigNode('service.ups', self.configstore).__getstate__()

    @accepts()
    @returns(h.array(h.array(str)))
    def drivers(self):
        driver_list = '/usr/local/libexec/nut/driver.list'
        if not os.path.exists(driver_list):
            return []
        drivers = []
        with open(driver_list, 'rb') as f:
            d = f.read()
        r = io.StringIO()
        for line in re.sub(r'[ \t]+', ' ', d.decode('utf-8'), flags=re.M).split('\n'):
            r.write(line.strip() + '\n')
        r.seek(0)
        reader = csv.reader(r, delimiter=' ', quotechar='"')
        for row in reader:
            if len(row) == 0 or row[0].startswith('#'):
                continue
            if row[-2] == '#':
                last = -3
            else:
                last = -1
            if row[last].find(' (experimental)') != -1:
                row[last] = row[last].replace(' (experimental)', '').strip()
            drivers.append({'driver_name': row[last], 'description': '{0} ({1})'.format(
                ' '.join(row[0:last]), row[last]
            )})
        return drivers

    @accepts()
    @returns(h.array(h.array(str)))
    def get_usb_devices(self):
        usb_devices_list = []
        try:
            usbconfig_output = system('usbconfig')[0]
            if not usbconfig_output.startswith('No device match'):
                for device in usbconfig_output.rstrip().split('\n'):
                    device_path = os.path.join('/dev', device.split()[0][:-1])
                    device_description = re.findall(r'<.*?>', device)[0].strip('><')
                    usb_devices_list.append({'device': device_path, 'description': device_description})

        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

        return usb_devices_list

    @private
    def service_start(self):
        ups = self.get_config()
        if ups['mode'] == 'MASTER':
            rc_scripts = ['nut']
        else:
            rc_scripts = []
        rc_scripts.extend(['nut_upslog', 'nut_upsmon'])

        try:
            for i in rc_scripts:
                system("/usr/sbin/service", i, 'onestart')
        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

    @private
    def service_status(self):
        ups = self.get_config()
        if ups['mode'] == 'MASTER':
            rc_scripts = ['nut']
        else:
            rc_scripts = []
        rc_scripts.extend(['nut_upslog', 'nut_upsmon'])

        try:
            for i in rc_scripts:
                system("/usr/sbin/service", i, 'onestatus')
        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

    @private
    def service_stop(self):
        ups = self.get_config()
        rc_scripts = ['nut_upslog', 'nut_upsmon']
        if ups['mode'] == 'MASTER':
            rc_scripts.append('nut')

        try:
            for i in rc_scripts:
                system("/usr/sbin/service", i, 'onestop')
        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

    @private
    def service_restart(self):
        ups = self.get_config()
        # Stop monitor so it wont trigger signals when nut restarts
        verbs = [
            ('nut_upsmon', 'stop'),
            ('nut_upslog', 'restart'),
        ]
        if ups['mode'] == 'MASTER':
            verbs.append(('nut', 'restart'))
        verbs.append(('nut_upsmon', 'start'))

        try:
            for svc, verb in verbs:
                system("/usr/sbin/service", svc, 'one' + verb)
        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)
class DatastoreProvider(Provider):
    @query('VmDatastore')
    @generator
    def query(self, filter=None, params=None):
        drivers = self.supported_drivers()

        def extend(obj):
            obj['capabilities'] = drivers[obj['type']]
            obj['state'] = lazy(self.dispatcher.call_sync,
                                f'vm.datastore.{obj["type"]}.get_state',
                                obj['id'])
            return obj

        def doit():
            for i in drivers:
                with contextlib.suppress(Exception):
                    for d in self.dispatcher.call_sync(
                            'vm.datastore.{0}.discover'.format(i)):
                        yield extend(d)

            yield from self.datastore.query_stream('vm.datastores',
                                                   callback=extend)

        return q.query(doit(), *(filter or []), **(params or {}))

    @description("Returns list of supported datastore drivers")
    def supported_drivers(self):
        result = {}
        for p in list(self.dispatcher.plugins.values()):
            if p.metadata and p.metadata.get('type') == 'datastore':
                result[p.metadata['driver']] = {
                    'clones': p.metadata['clones'],
                    'snapshots': p.metadata['snapshots']
                }

        return result

    @private
    @accepts(str)
    @returns(str)
    def get_state(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(f'vm.datastore.{driver}.get_state',
                                         datastore_id)

    @description('Lists disks or files or block devices')
    @accepts(h.ref('VmDatastorePathType'), h.one_of(str, None), str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id=None, root_path='/'):
        if type == 'DISK':
            available_disks_paths = self.dispatcher.call_sync(
                'volume.get_available_disks')
            available_disks = self.dispatcher.call_sync(
                'disk.query', [('path', 'in', available_disks_paths)],
                {'select': ('id', 'path', 'status.description', 'mediasize')})
            return [{
                'path': i,
                'size': s,
                'description': '{} {}'.format(p, d),
                'type': type
            } for i, p, d, s in available_disks]

        if not datastore_id:
            raise RpcException(
                errno.EINVAL,
                'Datastore ID has to be specified for BLOCK and FILE path types'
            )

        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.list'.format(driver), type, datastore_id,
            normpath(root_path))

    @private
    @accepts(str)
    @returns(str)
    @description('Returns type of a datastore driver')
    def get_driver(self, id):
        type = self.query([('id', '=', id)], {
            'single': True,
            'select': 'type'
        })
        if not type:
            raise RpcException(errno.ENOENT,
                               'Datastore {0} not found'.format(id))

        return type

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence under a selected VM datastore'
                 )
    def directory_exists(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.directory_exists'.format(driver), datastore_id,
            normpath(datastore_path))

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_filesystem_path'.format(driver),
            datastore_id, normpath(datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_resources'.format(driver), datastore_id)

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshots'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.snapshot_exists'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_clone_source'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshot_clones'.format(driver),
            datastore_id, normpath(path))

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_path_type'.format(driver), id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.list_dirs'.format(driver), id, normpath(path))
Beispiel #14
0
    def run(self, name):
        try:
            zfs = libzfs.ZFS()
            zfs.destroy(name)
        except libzfs.ZFSException as err:
            raise TaskException(errno.EFAULT, str(err))


@accepts(
    str,
    h.any_of(
        h.ref('zfs-topology'),
        None
    ),
    h.any_of(
        h.array(h.ref('zfs-vdev-extension')),
        None
    )
)
class ZpoolExtendTask(ZpoolBaseTask):
    def __init__(self, dispatcher, datastore):
        super(ZpoolExtendTask, self).__init__(dispatcher, datastore)
        self.pool = None
        self.started = False

    def run(self, pool, new_vdevs, updated_vdevs):
        try:
            self.pool = pool
            zfs = libzfs.ZFS()
            pool = zfs.get(pool)
Beispiel #15
0
class UpdateProvider(Provider):
    @accepts()
    @returns(str)
    def is_update_available(self):
        temp_available = update_cache.get('available', timeout=1)
        if temp_available is not None:
            return temp_available
        elif update_cache.is_valid('available'):
            return temp_available
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(str))
    def obtain_changelog(self):
        temp_changelog = update_cache.get('changelog', timeout=1)
        if temp_changelog is not None:
            return temp_changelog
        elif update_cache.is_valid('changelog'):
            return temp_changelog
        else:
            raise RpcException(
                errno.EBUSY, ('Changelog list is invalidated, an Update Check '
                              'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(h.ref('update-ops')))
    def get_update_ops(self):
        temp_operations = update_cache.get('operations', timeout=1)
        if temp_operations is not None:
            return temp_operations
        elif update_cache.is_valid('operations'):
            return temp_operations
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Operations Dict is invalidated, an Update Check '
                 'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.ref('update-info'))
    def update_info(self):
        if not update_cache.is_valid('available'):
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))
        info_item_list = [
            'available', 'changelog', 'notes', 'notice', 'operations',
            'downloaded', 'version', 'installed', 'installed_version'
        ]
        return {
            key: update_cache.get(key, timeout=1)
            for key in info_item_list
        }

    @returns(h.any_of(
        h.array(h.ref('update-train')),
        None,
    ))
    def trains(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        trains = conf.AvailableTrains()

        if trains is None:
            logger.debug(
                'The AvailableTrains call returned None. Check your network connection'
            )
            return None
        seltrain = self.dispatcher.configstore.get('update.train')

        data = []
        for name in list(trains.keys()):
            if name in conf._trains:
                train = conf._trains.get(name)
            else:
                train = Train.Train(name)
            data.append({
                'name': train.Name(),
                'description': train.Description(),
                'sequence': train.LastSequence(),
                'current': True if name == seltrain else False,
            })
        return data

    @accepts()
    @returns(str)
    def get_current_train(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        return conf.CurrentTrain()

    @accepts()
    @returns(h.ref('update'))
    def get_config(self):
        return {
            'train': self.dispatcher.configstore.get('update.train'),
            'check_auto': self.dispatcher.configstore.get('update.check_auto'),
            'update_server': Configuration.Configuration().UpdateServerURL(),
        }

    @private
    @accepts(h.array(str))
    def update_cache_invalidate(self, value_list):
        for item in value_list:
            update_cache.invalidate(item)

    @private
    @accepts(h.object())
    def update_cache_putter(self, value_dict):
        for key, value in value_dict.items():
            update_cache.put(key, value)
        self.dispatcher.dispatch_event('update.update_info.updated',
                                       {'operation': 'update'})

    @private
    @accepts(str)
    @returns(h.any_of(None, str, bool, h.array(str)))
    def update_cache_getter(self, key):
        return update_cache.get(key, timeout=1)

    @private
    @accepts(str, str, h.any_of(None, h.object(additionalProperties=True)))
    def update_alert_set(self, update_class, update_version, kwargs=None):
        # Formulating a query to find any alerts in the current `update_class`
        # which could be either of ('UpdateAvailable', 'UpdateDownloaded', 'UpdateInstalled')
        # as well as any alerts for the specified update version string.
        # The reason I do this is because say an Update is Downloaded (FreeNAS-10-2016051047)
        # and there is either a previous alert for an older downloaded update OR there is a
        # previous alert for the same version itself but for it being available instead of being
        # downloaded already, both of these previous alerts would need to be cancelled and
        # replaced by 'UpdateDownloaded' for FreeNAS-10-2016051047.
        if kwargs is None:
            kwargs = {}
        existing_update_alerts = self.dispatcher.call_sync(
            'alert.query', [('and', [('active', '=', True),
                                     ('dismissed', '=', False)]),
                            ('or', [('class', '=', update_class),
                                    ('target', '=', update_version)])])
        title = UPDATE_ALERT_TITLE_MAP.get(update_class, 'Update Alert')
        desc = kwargs.get('desc')
        if desc is None:
            if update_class == 'UpdateAvailable':
                desc = 'Latest Update: {0} is available for download'.format(
                    update_version)
            elif update_class == 'UpdateDownloaded':
                desc = 'Update containing {0} is downloaded and ready for install'.format(
                    update_version)
            elif update_class == 'UpdateInstalled':
                update_installed_bootenv = kwargs.get(
                    'update_installed_bootenv')
                if update_installed_bootenv and not update_installed_bootenv[
                        0]['on_reboot']:
                    desc = 'Update containing {0} is installed.'.format(
                        update_version)
                    desc += ' Please activate {0} and Reboot to use this updated version'.format(
                        update_installed_bootenv[0]['realname'])
                else:
                    desc = 'Update containing {0} is installed and activated for next boot'.format(
                        update_version)
            else:
                # what state is this?
                raise RpcException(
                    errno.EINVAL,
                    'Unknown update alert class: {0}'.format(update_class))
        alert_payload = {
            'class': update_class,
            'title': title,
            'target': update_version,
            'description': desc
        }

        alert_exists = False
        # Purposely deleting stale alerts later on since if anything (in constructing the payload)
        # above this fails the exception prevents alert.cancel from being called.
        for update_alert in existing_update_alerts:
            if (update_alert['class'] == update_class
                    and update_alert["target"] == update_version
                    and update_alert["description"] == desc):
                alert_exists = True
                continue
            self.dispatcher.call_sync('alert.cancel', update_alert['id'])

        if not alert_exists:
            self.dispatcher.call_sync('alert.emit', alert_payload)
Beispiel #16
0
class LocalDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @description('Lists files or block devices')
    @accepts(h.ref('VmDatastorePathType'), str, str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id, root_path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', datastore_id, root_path)
        if not os.path.isdir(path):
            raise RpcException(errno.EINVAL, f'Selected path {root_path} is not a directory')

        result = []
        for i in os.listdir(path):
            abs_path = os.path.join(path, i)
            is_dir = os.path.isdir(abs_path)
            if not is_dir and type != 'FILE':
                continue

            result.append({
                'path': os.path.join('/', root_path, i),
                'type': 'DIRECTORY' if is_dir else 'FILE',
                'size': os.stat(abs_path).st_size,
                'description': ''
            })

        return result

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join(q.get(ds, 'properties.path'), datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in local VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(os.path.join(q.get(ds, 'properties.path'), datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', id, path)
        if not os.path.exists(path):
            raise RpcException(errno.ENOENT, 'Path {0} does not exist'.format(path))

        if os.path.isdir(path):
            return 'DIRECTORY'

        return 'FILE'

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', id, path)
        return [p[0] for p in os.walk(path)]
Beispiel #17
0
class DeviceInfoProvider(Provider):
    @description("Returns list of available device classes")
    @returns(h.array(str))
    def get_classes(self):
        return ["disk", "network", "cpu", "usb", "serial_port"]

    @description("Returns list of devices from given class")
    @accepts(str)
    @returns(
        h.any_of(h.ref('DiskDevice'), h.ref('NetworkDevice'),
                 h.ref('CpuDevice'), h.ref('SerialPortDevice'),
                 h.ref('UsbDevice')))
    def get_devices(self, dev_class):
        method = "_get_class_{0}".format(dev_class)
        if hasattr(self, method):
            return getattr(self, method)()

        return None

    def _get_class_disk(self):
        result = []
        geom.scan()
        for child in geom.class_by_name('DISK').geoms:
            result.append({
                "path": os.path.join("/dev", child.name),
                "name": child.name,
                "mediasize": child.provider.mediasize,
                "description": child.provider.config['descr']
            })

        return result

    def _get_class_multipath(self):
        result = []
        geom.scan()
        cls = geom.class_by_name('MULTIPATH')
        if not cls:
            return []

        for child in cls.geoms:
            result.append({
                "path": os.path.join("/dev", child.name),
                "name": child.name,
                "mediasize": child.provider.mediasize,
                "members": [c.provider.name for c in child.consumers]
            })

        return result

    def _get_class_network(self):
        result = []
        for i in list(netif.list_interfaces().keys()):
            if i.startswith(tuple(netif.CLONED_PREFIXES)):
                continue

            try:
                desc = get_sysctl(
                    re.sub('(\\w+)([0-9]+)', 'dev.\\1.\\2.%desc', i))
                result.append({'name': i, 'description': desc})
            except FileNotFoundError:
                continue

        return result

    def _get_class_serial_port(self):
        result = []
        for devices in devinfo.DevInfo().resource_managers['I/O ports'].values(
        ):
            for dev in devices:
                if not dev.name.startswith('uart'):
                    continue
                result.append({
                    'name': dev.name,
                    'description': dev.desc,
                    'drivername': dev.drivername,
                    'location': dev.location,
                    'start': hex(dev.start),
                    'size': dev.size
                })

        return result

    def _get_class_cpu(self):
        result = []
        ncpus = get_sysctl('hw.ncpu')
        model = get_sysctl('hw.model').strip('\x00')
        for i in range(0, ncpus):
            freq = None
            temp = None

            with contextlib.suppress(OSError):
                freq = get_sysctl('dev.cpu.{0}.freq'.format(i)),

            with contextlib.suppress(OSError):
                temp = get_sysctl('dev.cpu.{0}.temperature'.format(i))

            result.append({'name': model, 'freq': freq, 'temperature': temp})

        return result

    def _get_class_usb(self):
        result = []
        context = usb1.USBContext()

        for device in context.getDeviceList():
            result.append({
                'bus': device.getBusNumber(),
                'address': device.getDeviceAddress(),
                'manufacturer': device.getManufacturer(),
                'product': device.getProduct(),
                'vid': device.getVendorID(),
                'pid': device.getProductID(),
                'class': device.getDeviceClass()
            })

        context.exit()
        return result
Beispiel #18
0
class SharesProvider(Provider):
    @query('Share')
    @generator
    def query(self, filter=None, params=None):
        def extend(share):
            perms = None
            path = None

            try:
                path = self.translate_path(share['id'])
                if share['target_type'] in ('DIRECTORY', 'DATASET', 'FILE'):
                    perms = self.dispatcher.call_sync('filesystem.stat', path)
            except RpcException:
                pass

            share['filesystem_path'] = path
            share['permissions'] = perms['permissions'] if perms else None
            return share

        return q.query(
            self.datastore.query_stream('shares', callback=extend),
            *(filter or []),
            stream=True,
            **(params or {})
        )

    @description("Returns list of supported sharing providers")
    @accepts()
    @returns(h.ref('ShareTypes'))
    def supported_types(self):
        result = {}
        for p in list(self.dispatcher.plugins.values()):
            if p.metadata and p.metadata.get('type') == 'sharing':
                result[p.metadata['method']] = {
                    'subtype': p.metadata['subtype'],
                    'perm_type': p.metadata.get('perm_type')
                }

        return result

    @description("Returns list of clients connected to particular share")
    @accepts(str)
    @returns(h.array(h.ref('ShareClient')))
    def get_connected_clients(self, id):
        share = self.datastore.get_by_id('shares', id)
        if not share:
            raise RpcException(errno.ENOENT, 'Share not found')

        return self.dispatcher.call_sync('share.{0}.get_connected_clients'.format(share['type']), id)

    @description("Get shares dependent on provided filesystem path")
    @accepts(str, bool, bool)
    @returns(h.array(h.ref('Share')))
    def get_dependencies(self, path, enabled_only=True, recursive=True):
        result = []
        if enabled_only:
            shares = self.datastore.query_stream('shares', ('enabled', '=', True))
        else:
            shares = self.datastore.query_stream('shares')

        for i in shares:
            target_path = self.translate_path(i['id'])
            if recursive:
                if in_directory(target_path, path):
                    result.append(i)
            else:
                if target_path == path:
                    result.append(i)

        return result

    @private
    def translate_path(self, share_id):
        share = self.datastore.get_by_id('shares', share_id)
        if not share:
            raise RpcException(errno.ENOENT, 'Share {0} not found'.format(share_id))

        return self.dispatcher.call_sync('share.expand_path', share['target_path'], share['target_type'])

    @private
    def expand_path(self, path, type):
        root = self.dispatcher.call_sync('volume.get_volumes_root')
        if type == 'DATASET':
            return os.path.join(root, path)

        if type == 'ZVOL':
            return os.path.join('/dev/zvol', path)

        if type in ('DIRECTORY', 'FILE'):
            return path

        raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))

    @private
    def get_directory_path(self, share_id):
        share = self.datastore.get_by_id('shares', share_id)
        return self.dispatcher.call_sync('share.get_dir_by_path', share['target_path'], share['target_type'])

    @private
    def get_dir_by_path(self, path, type):
        root = self.dispatcher.call_sync('volume.get_volumes_root')
        if type == 'DATASET':
            return os.path.join(root, path)

        if type == 'ZVOL':
            return os.path.dirname(os.path.join(root, path))

        if type == 'DIRECTORY':
            return path

        if type == 'FILE':
            return os.path.dirname(path)

        raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))
class DatastoreProvider(Provider):
    @query('VmDatastore')
    @generator
    def query(self, filter=None, params=None):
        drivers = self.supported_drivers()

        def extend(obj):
            obj['capabilities'] = drivers[obj['type']]
            return obj

        def doit():
            for i in drivers:
                with contextlib.suppress(BaseException):
                    for d in self.dispatcher.call_sync('vm.datastore.{0}.discover'.format(i)):
                        yield extend(d)

            yield from self.datastore.query_stream('vm.datastores', callback=extend)

        return q.query(doit(), *(filter or []), **(params or {}))

    @description("Returns list of supported datastore drivers")
    def supported_drivers(self):
        result = {}
        for p in list(self.dispatcher.plugins.values()):
            if p.metadata and p.metadata.get('type') == 'datastore':
                result[p.metadata['driver']] = {
                    'clones': p.metadata['clones'],
                    'snapshots': p.metadata['snapshots']
                }

        return result

    @private
    @accepts(str)
    @returns(str)
    @description('Returns type of a datastore driver')
    def get_driver(self, id):
        ds = self.query([('id', '=', id)], {'single': True})
        if not ds:
            raise RpcException(errno.ENOENT, 'Datastore {0} not found'.format(id))

        return ds['type']

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence under a selected VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.directory_exists'.format(driver),
            datastore_id,
            normpath(datastore_path)
        )

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_filesystem_path'.format(driver),
            datastore_id,
            normpath(datastore_path)
        )

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_resources'.format(driver),
            datastore_id
        )

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshots'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.snapshot_exists'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_clone_source'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshot_clones'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync('vm.datastore.{0}.get_path_type'.format(driver), id, normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync('vm.datastore.{0}.list_dirs'.format(driver), id, normpath(path))
class NFSDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts remote NFS VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'nfs':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join('/nfs', ds['name'], datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in NFS VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'nfs':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(os.path.join('/nfs', ds['name'], datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        return self.dispatcher.call_sync('vm.datastore.local.get_path_type', id, path)

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        return self.dispatcher.call_sync('vm.datastore.local.list_dirs', id, path)
class LocalDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join(q.get(ds, 'properties.path'), datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in local VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(
            os.path.join(q.get(ds, 'properties.path'), datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path',
                                         id, path)
        if not os.path.exists(path):
            raise RpcException(errno.ENOENT,
                               'Path {0} does not exist'.format(path))

        if os.path.isdir(path):
            return 'DIRECTORY'

        return 'BLOCK_DEVICE'

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path',
                                         id, path)
        return [p[0] for p in os.walk(path)]
Beispiel #22
0
import errno
import logging
import os
import shutil
import ipfsapi
from requests.exceptions import ConnectionError
from datastore.config import ConfigNode
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns, private
from task import Task, Provider, TaskException, ValidationException, TaskDescription

logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}
Beispiel #23
0
class FilesystemProvider(Provider):
    @description("Lists contents of given directory")
    @accepts(str)
    @returns(h.array(h.ref('Directory')))
    def list_dir(self, path):
        result = []
        if not os.path.isdir(path):
            raise RpcException(errno.ENOENT,
                               'Path {0} is not a directory'.format(path))

        for i in os.listdir(path):
            try:
                st = os.stat(os.path.join(path, i))
            except OSError:
                continue

            item = {
                'name': i,
                'type': get_type(st),
                'size': st.st_size,
                'modified': st.st_mtime
            }

            result.append(item)

        return result

    @accepts(str)
    @returns(h.ref('Stat'))
    def stat(self, path):
        try:
            st = os.stat(path, follow_symlinks=False)
            a = acl.ACL(file=path)
        except OSError as err:
            raise RpcException(err.errno, str(err))

        try:
            user = self.dispatcher.call_sync('dscached.account.getpwuid',
                                             st.st_uid)
            domain = q.get(user, 'origin.domain')
            at = '@' if domain else None
            username = f'{user["username"]}{at}{domain}'
        except RpcException:
            username = None

        try:
            group = self.dispatcher.call_sync('dscached.group.getgrgid',
                                              st.st_gid)
            domain = q.get(group, 'origin.domain')
            at = '@' if domain else None
            groupname = f'{group["name"]}{at}{domain}'
        except RpcException:
            groupname = None

        return {
            'path': path,
            'type': get_type(st),
            'atime': datetime.utcfromtimestamp(st.st_atime),
            'mtime': datetime.utcfromtimestamp(st.st_mtime),
            'ctime': datetime.utcfromtimestamp(st.st_ctime),
            'uid': st.st_uid,
            'user': username,
            'gid': st.st_gid,
            'group': groupname,
            'permissions': {
                'acl': self.dispatcher.threaded(a.__getstate__),
                'user': username,
                'group': groupname,
                'modes': {
                    'value': st.st_mode & 0o777,
                    'user': {
                        'read': bool(st.st_mode & stat.S_IRUSR),
                        'write': bool(st.st_mode & stat.S_IWUSR),
                        'execute': bool(st.st_mode & stat.S_IXUSR)
                    },
                    'group': {
                        'read': bool(st.st_mode & stat.S_IRGRP),
                        'write': bool(st.st_mode & stat.S_IWGRP),
                        'execute': bool(st.st_mode & stat.S_IXGRP)
                    },
                    'others': {
                        'read': bool(st.st_mode & stat.S_IROTH),
                        'write': bool(st.st_mode & stat.S_IWOTH),
                        'execute': bool(st.st_mode & stat.S_IXOTH)
                    },
                }
            }
        }

    @pass_sender
    @accepts(str)
    @returns(str)
    def download(self, path, sender):
        try:
            f = open(path, 'rb')
        except OSError as e:
            raise RpcException(e.errno, e)

        token = self.dispatcher.token_store.issue_token(
            FileToken(user=sender.user,
                      lifetime=60,
                      direction='download',
                      file=f,
                      name=os.path.basename(f.name),
                      size=os.path.getsize(path)))

        return token

    @pass_sender
    @accepts(str, h.one_of(int, None), str)
    @returns(str)
    def upload(self, dest_path, size, mode, sender):
        try:
            f = open(dest_path, 'wb')
        except OSError as e:
            raise RpcException(e.errno, e)

        token = self.dispatcher.token_store.issue_token(
            FileToken(user=sender.user,
                      lifetime=60,
                      direction='upload',
                      file=f,
                      name=os.path.basename(dest_path),
                      size=size))

        return token

    @accepts(str)
    @returns(h.array(h.ref('OpenFile')))
    @generator
    def get_open_files(self, path):
        for proc in self.dispatcher.threaded(bsd.getprocs,
                                             bsd.ProcessLookupPredicate.PROC):
            for f in self.dispatcher.threaded(lambda: list(proc.files)):
                if not f.path:
                    continue

                if f.path.startswith(path):
                    yield {
                        'pid': proc.pid,
                        'process_name': proc.command,
                        'path': f.path
                    }
Beispiel #24
0
class VolumeDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        for vol in self.dispatcher.call_sync('volume.query', [],
                                             {'select': 'id'}):
            yield {'id': vol, 'name': vol, 'type': 'volume'}

    @private
    @description('Lists files or ZVOLs')
    @accepts(h.ref('VmDatastorePathType'), str, str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id, root_path):
        result = []
        if type == 'BLOCK':
            if root_path:
                dataset = os.path.join(datastore_id, root_path)
            else:
                dataset = datastore_id

            zvols = self.dispatcher.call_sync(
                'volume.dataset.query', [('id', '~', f'^{dataset}/((?!/).)*$'),
                                         ('type', '=', 'VOLUME')],
                {'select': ('id', 'volsize')})
            for zvol, size in zvols:
                result.append({
                    'path': '/' + '/'.join(zvol.split('/')[1:]),
                    'type': 'BLOCK',
                    'size': size,
                    'description': ''
                })

        result.extend(
            self.dispatcher.call_sync('vm.datastore.local.list', type,
                                      datastore_id, root_path))
        return result

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts dataset path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        return self.dispatcher.call_sync('volume.resolve_path', datastore_id,
                                         datastore_path)

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        return ['zpool:{0}'.format(datastore_id)]

    @private
    @accepts(str, str)
    @returns(bool)
    @description(
        'Checks for existence of dataset representing a VM datastore\'s directory'
    )
    def directory_exists(self, datastore_id, datastore_path):
        path = os.path.join(datastore_id, datastore_path)
        return self.dispatcher.call_sync('volume.dataset.query',
                                         [('id', '=', path)],
                                         {'single': True}) is not None

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        dataset = os.path.join(datastore_id, path)
        snapshots = self.dispatcher.call_sync(
            'volume.snapshot.query',
            [('dataset', '=', dataset),
             ('metadata', 'contains', 'org.freenas:vm_snapshot')],
            {'select': 'metadata.org\\.freenas:vm_snapshot'})
        for snap_id in snapshots:
            yield '{0}@{1}'.format(path, snap_id)

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        raw_dataset, snap_id = path.split('@', 1)
        dataset = os.path.join(datastore_id, raw_dataset)
        return bool(
            self.dispatcher.call_sync(
                'volume.snapshot.query',
                [('dataset', '=', dataset),
                 ('metadata.org\\.freenas:vm_snapshot', '=', snap_id)],
                {'count': True}))

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        dataset = os.path.join(datastore_id, path)
        origin = self.dispatcher.call_sync(
            'volume.dataset.query', [('id', '=', dataset)], {
                'select': 'properties.origin.parsed',
                'single': True
            })
        if origin:
            dataset, snap_id = self.dispatcher.call_sync(
                'volume.snapshot.query', [('id', '=', origin)], {
                    'select':
                    ('dataset', 'metadata.org\\.freenas:vm_snapshot'),
                    'single': True
                })
            if snap_id:
                dataset = '/'.join(dataset.split('/')[1:])
                return f'/{dataset}@{snap_id}'

        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        raw_dataset, snap_id = path.split('@', 1)
        dataset = os.path.join(datastore_id, raw_dataset)
        snapshot_id = self.dispatcher.call_sync(
            'volume.snapshot.query',
            [('dataset', '=', dataset),
             ('metadata.org\\.freenas:vm_snapshot', '=', snap_id)], {
                 'single': True,
                 'select': 'id'
             })
        datasets = self.dispatcher.call_sync(
            'volume.dataset.query',
            [('properties.origin.parsed', '=', snapshot_id)], {'select': 'id'})
        return ['/' + '/'.join(d.split('/')[1:]) for d in datasets]

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        zfs_path = os.path.join(id, path)

        ds_type = self.dispatcher.call_sync('volume.dataset.query',
                                            [('id', '=', zfs_path)], {
                                                'single': True,
                                                'select': 'type'
                                            })
        if ds_type:
            if ds_type == 'VOLUME':
                return 'BLOCK'
            return 'DIRECTORY'

        if self.dispatcher.call_sync('volume.snapshot.query',
                                     [('id', '=', zfs_path)], {'count': True}):
            return 'SNAPSHOT'

        if os.path.exists(
                self.dispatcher.call_sync(
                    'vm.datastore.volume.get_filesystem_path', id, path)):
            return 'FILE'
        else:
            raise RpcException(errno.ENOENT,
                               'Path {0} does not exist'.format(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        dataset = os.path.join(id, path)
        matching_datasets = self.dispatcher.call_sync(
            'volume.dataset.query', [('id', '~', f'^{dataset}(/.*)?$')],
            {'select': 'id'})
        return ['/' + '/'.join(d.split('/')[1:]) for d in matching_datasets]
Beispiel #25
0
class SessionProvider(Provider):
    @query('Session')
    @generator
    def query(self, filter=None, params=None):
        return self.datastore.query_stream('sessions', *(filter or []),
                                           **(params or {}))

    @accepts()
    @returns(h.array(h.ref('Session')))
    @description("Returns the logged in and active user sessions" +
                 "Does not include the service sessions in this.")
    def get_live_user_sessions(self):
        live_user_session_ids = []
        for srv in self.dispatcher.ws_servers:
            for conn in srv.connections:
                # The if check for 'uid' below is to seperate the actuall gui/cli
                # users of the websocket connection from that of system services
                # like etcd, statd and so on.
                if hasattr(conn.user, 'uid'):
                    live_user_session_ids.append(conn.session_id)

        return self.datastore.query('sessions',
                                    ('id', 'in', live_user_session_ids))

    @pass_sender
    @returns(int)
    def get_my_session_id(self, sender):
        return sender.session_id

    @description("Returns the logged in user for the current session")
    @returns(str)
    @pass_sender
    def whoami(self, sender):
        return sender.user.name

    @description("Sends a message to given session")
    @accepts(int, str)
    @pass_sender
    def send_to_session(self, id, message, sender):
        target = None
        for srv in self.dispatcher.ws_servers:
            target = first_or_default(lambda s: s.session_id == id,
                                      srv.connections)
            if target:
                break

        if not target:
            raise RpcException(errno.ENOENT,
                               'Session {0} not found'.format(id))

        target.outgoing_events.put(('session.message', {
            'sender_id':
            sender.session_id,
            'sender_name':
            sender.user.name if sender.user else None,
            'message':
            message
        }))

    @description("Sends a message to every active session")
    @accepts(str)
    @pass_sender
    def send_to_all(self, message, sender):
        for srv in self.dispatcher.ws_servers:
            for target in srv.connections:
                target.outgoing_events.put(('session.message', {
                    'sender_id':
                    sender.session_id,
                    'sender_name':
                    sender.user.name if sender.user else None,
                    'message':
                    message
                }))
Beispiel #26
0
                'name': name,
                'type': type,
                'mountpoint': mountpoint,
                'topology': volume['topology'],
                'attributes': volume.get('attributes', {})
            })

        self.set_progress(90)
        self.dispatcher.dispatch_event('volumes.changed', {
            'operation': 'create',
            'ids': [id]
        })


@description("Creates new volume and automatically guesses disks layout")
@accepts(str, str, h.array(str), h.object())
class VolumeAutoCreateTask(Task):
    def verify(self, name, type, disks, params=None):
        if self.datastore.exists('volumes', ('name', '=', name)):
            raise VerifyException(errno.EEXIST,
                                  'Volume with same name already exists')

        return ['disk:{0}'.format(os.path.join('/dev', i)) for i in disks]

    def run(self, name, type, disks, params=None):
        vdevs = []
        if len(disks) % 3 == 0:
            for i in range(0, len(disks), 3):
                vdevs.append({
                    'type': 'raidz1',
                    'children': [{'type': 'disk', 'path': os.path.join('/dev', i)} for i in disks[i:i+3]]