Esempio n. 1
0
class StatProvider(Provider):
    @query('Statistic')
    @generator
    def query(self, filter=None, params=None):
        stats = self.dispatcher.call_sync('statd.output.get_current_state')
        return q.query(stats, *(filter or []), stream=True, **(params or {}))

    @returns(h.array(str))
    @generator
    def get_data_sources(self):
        return self.dispatcher.call_sync('statd.output.get_data_sources')

    def get_data_sources_tree(self):
        return self.dispatcher.call_sync('statd.output.get_data_sources_tree')

    @accepts(h.one_of(str, h.array(str)), h.ref('GetStatsParams'))
    @returns(h.ref('GetStatsResult'))
    def get_stats(self, data_source, params):
        return {
            'data':
            list(
                self.dispatcher.call_sync('statd.output.get_stats',
                                          data_source, params))
        }

    def normalize(self, name, value):
        return normalize(name, value)
Esempio n. 2
0
class SystemInfoProvider(Provider):
    def __init__(self):
        self.__version = None

    @accepts()
    @returns(h.array(str))
    def uname_full(self):
        return os.uname()

    @accepts()
    @returns(str)
    @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.")
    def version(self):
        if self.__version is None:
            # See #9113
            conf = Configuration.Configuration()
            manifest = conf.SystemManifest()
            if manifest:
                self.__version = manifest.Version()
            else:
                with open(VERSION_FILE) as fd:
                    self.__version = fd.read().strip()

        return self.__version

    @accepts()
    @returns({'type': 'array', 'items': {'type': 'number'}, 'maxItems': 3, 'minItems': 3})
    def load_avg(self):
        return list(os.getloadavg())

    @accepts()
    @returns(h.object(properties={
        'cpu_model': str,
        'cpu_cores': int,
        'cpu_clockrate': int,
        'memory_size': int,
        'vm_guest': h.one_of(str, None)
    }))
    def hardware(self):
        vm_guest = get_sysctl("kern.vm_guest")
        return {
            'cpu_model': get_sysctl("hw.model"),
            'cpu_cores': get_sysctl("hw.ncpu"),
            'cpu_clockrate': get_sysctl("hw.clockrate"),
            'memory_size': get_sysctl("hw.physmem"),
            'vm_guest': None if vm_guest == 'none' else vm_guest
        }

    @accepts()
    @returns(str)
    def host_uuid(self):
        return get_sysctl("kern.hostuuid")[:-1]
Esempio n. 3
0
class VolumeDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        for vol in self.dispatcher.call_sync('volume.query', [],
                                             {'select': 'id'}):
            yield {'id': vol, 'name': vol, 'type': 'volume'}

    @private
    @description('Lists files or ZVOLs')
    @accepts(h.ref('VmDatastorePathType'), str, str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id, root_path):
        result = []
        if type == 'BLOCK':
            if root_path:
                dataset = os.path.join(datastore_id, root_path)
            else:
                dataset = datastore_id

            zvols = self.dispatcher.call_sync(
                'volume.dataset.query', [('id', '~', f'^{dataset}/((?!/).)*$'),
                                         ('type', '=', 'VOLUME')],
                {'select': ('id', 'volsize')})
            for zvol, size in zvols:
                result.append({
                    'path': '/' + '/'.join(zvol.split('/')[1:]),
                    'type': 'BLOCK',
                    'size': size,
                    'description': ''
                })

        result.extend(
            self.dispatcher.call_sync('vm.datastore.local.list', type,
                                      datastore_id, root_path))
        return result

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts dataset path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        return self.dispatcher.call_sync('volume.resolve_path', datastore_id,
                                         datastore_path)

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        return ['zpool:{0}'.format(datastore_id)]

    @private
    @accepts(str, str)
    @returns(bool)
    @description(
        'Checks for existence of dataset representing a VM datastore\'s directory'
    )
    def directory_exists(self, datastore_id, datastore_path):
        path = os.path.join(datastore_id, datastore_path)
        return self.dispatcher.call_sync('volume.dataset.query',
                                         [('id', '=', path)],
                                         {'single': True}) is not None

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        dataset = os.path.join(datastore_id, path)
        snapshots = self.dispatcher.call_sync(
            'volume.snapshot.query',
            [('dataset', '=', dataset),
             ('metadata', 'contains', 'org.freenas:vm_snapshot')],
            {'select': 'metadata.org\\.freenas:vm_snapshot'})
        for snap_id in snapshots:
            yield '{0}@{1}'.format(path, snap_id)

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        raw_dataset, snap_id = path.split('@', 1)
        dataset = os.path.join(datastore_id, raw_dataset)
        return bool(
            self.dispatcher.call_sync(
                'volume.snapshot.query',
                [('dataset', '=', dataset),
                 ('metadata.org\\.freenas:vm_snapshot', '=', snap_id)],
                {'count': True}))

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        dataset = os.path.join(datastore_id, path)
        origin = self.dispatcher.call_sync(
            'volume.dataset.query', [('id', '=', dataset)], {
                'select': 'properties.origin.parsed',
                'single': True
            })
        if origin:
            dataset, snap_id = self.dispatcher.call_sync(
                'volume.snapshot.query', [('id', '=', origin)], {
                    'select':
                    ('dataset', 'metadata.org\\.freenas:vm_snapshot'),
                    'single': True
                })
            if snap_id:
                dataset = '/'.join(dataset.split('/')[1:])
                return f'/{dataset}@{snap_id}'

        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        raw_dataset, snap_id = path.split('@', 1)
        dataset = os.path.join(datastore_id, raw_dataset)
        snapshot_id = self.dispatcher.call_sync(
            'volume.snapshot.query',
            [('dataset', '=', dataset),
             ('metadata.org\\.freenas:vm_snapshot', '=', snap_id)], {
                 'single': True,
                 'select': 'id'
             })
        datasets = self.dispatcher.call_sync(
            'volume.dataset.query',
            [('properties.origin.parsed', '=', snapshot_id)], {'select': 'id'})
        return ['/' + '/'.join(d.split('/')[1:]) for d in datasets]

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        zfs_path = os.path.join(id, path)

        ds_type = self.dispatcher.call_sync('volume.dataset.query',
                                            [('id', '=', zfs_path)], {
                                                'single': True,
                                                'select': 'type'
                                            })
        if ds_type:
            if ds_type == 'VOLUME':
                return 'BLOCK'
            return 'DIRECTORY'

        if self.dispatcher.call_sync('volume.snapshot.query',
                                     [('id', '=', zfs_path)], {'count': True}):
            return 'SNAPSHOT'

        if os.path.exists(
                self.dispatcher.call_sync(
                    'vm.datastore.volume.get_filesystem_path', id, path)):
            return 'FILE'
        else:
            raise RpcException(errno.ENOENT,
                               'Path {0} does not exist'.format(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        dataset = os.path.join(id, path)
        matching_datasets = self.dispatcher.call_sync(
            'volume.dataset.query', [('id', '~', f'^{dataset}(/.*)?$')],
            {'select': 'id'})
        return ['/' + '/'.join(d.split('/')[1:]) for d in matching_datasets]
Esempio n. 4
0
class FilesystemProvider(Provider):
    @description("Lists contents of given directory")
    @accepts(str)
    @returns(h.array(h.ref('Directory')))
    def list_dir(self, path):
        result = []
        if not os.path.isdir(path):
            raise RpcException(errno.ENOENT,
                               'Path {0} is not a directory'.format(path))

        for i in os.listdir(path):
            try:
                st = os.stat(os.path.join(path, i))
            except OSError:
                continue

            item = {
                'name': i,
                'type': get_type(st),
                'size': st.st_size,
                'modified': st.st_mtime
            }

            result.append(item)

        return result

    @accepts(str)
    @returns(h.ref('Stat'))
    def stat(self, path):
        try:
            st = os.stat(path, follow_symlinks=False)
            a = acl.ACL(file=path)
        except OSError as err:
            raise RpcException(err.errno, str(err))

        try:
            user = self.dispatcher.call_sync('dscached.account.getpwuid',
                                             st.st_uid)
            domain = q.get(user, 'origin.domain')
            at = '@' if domain else None
            username = f'{user["username"]}{at}{domain}'
        except RpcException:
            username = None

        try:
            group = self.dispatcher.call_sync('dscached.group.getgrgid',
                                              st.st_gid)
            domain = q.get(group, 'origin.domain')
            at = '@' if domain else None
            groupname = f'{group["name"]}{at}{domain}'
        except RpcException:
            groupname = None

        return {
            'path': path,
            'type': get_type(st),
            'atime': datetime.utcfromtimestamp(st.st_atime),
            'mtime': datetime.utcfromtimestamp(st.st_mtime),
            'ctime': datetime.utcfromtimestamp(st.st_ctime),
            'uid': st.st_uid,
            'user': username,
            'gid': st.st_gid,
            'group': groupname,
            'permissions': {
                'acl': self.dispatcher.threaded(a.__getstate__),
                'user': username,
                'group': groupname,
                'modes': {
                    'value': st.st_mode & 0o777,
                    'user': {
                        'read': bool(st.st_mode & stat.S_IRUSR),
                        'write': bool(st.st_mode & stat.S_IWUSR),
                        'execute': bool(st.st_mode & stat.S_IXUSR)
                    },
                    'group': {
                        'read': bool(st.st_mode & stat.S_IRGRP),
                        'write': bool(st.st_mode & stat.S_IWGRP),
                        'execute': bool(st.st_mode & stat.S_IXGRP)
                    },
                    'others': {
                        'read': bool(st.st_mode & stat.S_IROTH),
                        'write': bool(st.st_mode & stat.S_IWOTH),
                        'execute': bool(st.st_mode & stat.S_IXOTH)
                    },
                }
            }
        }

    @pass_sender
    @accepts(str)
    @returns(str)
    def download(self, path, sender):
        try:
            f = open(path, 'rb')
        except OSError as e:
            raise RpcException(e.errno, e)

        token = self.dispatcher.token_store.issue_token(
            FileToken(user=sender.user,
                      lifetime=60,
                      direction='download',
                      file=f,
                      name=os.path.basename(f.name),
                      size=os.path.getsize(path)))

        return token

    @pass_sender
    @accepts(str, h.one_of(int, None), str)
    @returns(str)
    def upload(self, dest_path, size, mode, sender):
        try:
            f = open(dest_path, 'wb')
        except OSError as e:
            raise RpcException(e.errno, e)

        token = self.dispatcher.token_store.issue_token(
            FileToken(user=sender.user,
                      lifetime=60,
                      direction='upload',
                      file=f,
                      name=os.path.basename(dest_path),
                      size=size))

        return token

    @accepts(str)
    @returns(h.array(h.ref('OpenFile')))
    @generator
    def get_open_files(self, path):
        for proc in self.dispatcher.threaded(bsd.getprocs,
                                             bsd.ProcessLookupPredicate.PROC):
            for f in self.dispatcher.threaded(lambda: list(proc.files)):
                if not f.path:
                    continue

                if f.path.startswith(path):
                    yield {
                        'pid': proc.pid,
                        'process_name': proc.command,
                        'path': f.path
                    }
Esempio n. 5
0
class AlertsProvider(Provider):
    @query('Alert')
    @generator
    def query(self, filter=None, params=None):
        return self.datastore.query_stream('alerts', *(filter or []),
                                           **(params or {}))

    @private
    @accepts(str, str)
    @returns(h.one_of(h.ref('Alert'), None))
    def get_active_alert(self, cls, target):
        return self.datastore.query('alerts', ('clazz', '=', cls),
                                    ('target', '=', target),
                                    ('active', '=', True),
                                    single=True)

    @description("Dismisses an alert")
    def dismiss(self, id: int) -> None:
        alert = self.datastore.get_by_id('alerts', id)
        if not alert:
            raise RpcException(errno.ENOENT, 'Alert {0} not found'.format(id))

        if alert['dismissed']:
            raise RpcException(errno.ENOENT,
                               'Alert {0} is already dismissed'.format(id))

        if alert['one_shot']:
            alert['active'] = False

        alert.update({'dismissed': True, 'dismissed_at': datetime.utcnow()})

        self.datastore.update('alerts', id, alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'update',
            'ids': [id]
        })

    @description("Dismisses/Deletes all alerts from the database")
    def dismiss_all(self) -> None:
        alert_list = self.query([('dismissed', '=', False)])
        alert_ids = []
        for alert in alert_list:
            alert.update({
                'dismissed': True,
                'dismissed_at': datetime.utcnow()
            })
            self.datastore.update('alerts', alert['id'], alert)
            alert_ids.append(alert['id'])

        if alert_ids:
            self.dispatcher.dispatch_event('alert.changed', {
                'operation': 'update',
                'ids': [alert_ids]
            })

    @private
    @description("Emits an event for the provided alert")
    @accepts(h.all_of(h.ref('Alert'), h.required('clazz')))
    @returns(int)
    def emit(self, alert):
        cls = self.datastore.get_by_id('alert.classes', alert['clazz'])
        if not cls:
            raise RpcException(
                errno.ENOENT,
                'Alert class {0} not found'.format(alert['clazz']))

        normalize(
            alert, {
                'when': datetime.utcnow(),
                'dismissed': False,
                'active': True,
                'one_shot': False,
                'severity': cls['severity']
            })

        alert.update({
            'type': cls['type'],
            'subtype': cls['subtype'],
            'send_count': 0
        })

        id = self.datastore.insert('alerts', alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'create',
            'ids': [id]
        })

        try:
            self.dispatcher.call_sync('alertd.alert.emit', id)
        except RpcException as err:
            if err.code == errno.ENOENT:
                # Alertd didn't start yet. Add alert to the pending queue
                pending_alerts.append(id)
            else:
                raise

        return id

    @private
    @description("Cancels already scheduled alert")
    def cancel(self, id: int) -> int:
        alert = self.datastore.get_by_id('alerts', id)
        if not alert:
            raise RpcException(errno.ENOENT, 'Alert {0} not found'.format(id))

        if not alert['active']:
            raise RpcException(errno.ENOENT,
                               'Alert {0} is already cancelled'.format(id))

        alert.update({'active': False, 'cancelled_at': datetime.utcnow()})

        self.datastore.update('alerts', id, alert)
        self.dispatcher.dispatch_event('alert.changed', {
            'operation': 'update',
            'ids': [id]
        })

        try:
            self.dispatcher.call_sync('alertd.alert.cancel', id)
        except RpcException as err:
            if err.code == errno.ENOENT:
                # Alertd didn't start yet. Add alert to the pending queue
                pending_cancels.append(id)
            else:
                raise

        return id

    @description("Returns list of registered alerts")
    def get_alert_classes(self) -> List[AlertClass]:
        return self.datastore.query('alert.classes')

    @description("Returns list of registered alert severities")
    def get_alert_severities(self) -> Set[AlertSeverity]:
        alert_classes = self.get_alert_classes()
        return {alert_class['severity'] for alert_class in alert_classes}
Esempio n. 6
0
class DatastoreProvider(Provider):
    @query('VmDatastore')
    @generator
    def query(self, filter=None, params=None):
        drivers = self.supported_drivers()

        def extend(obj):
            obj['capabilities'] = drivers[obj['type']]
            return obj

        def doit():
            for i in drivers:
                with contextlib.suppress(BaseException):
                    for d in self.dispatcher.call_sync('vm.datastore.{0}.discover'.format(i)):
                        yield extend(d)

            yield from self.datastore.query_stream('vm.datastores', callback=extend)

        return q.query(doit(), *(filter or []), **(params or {}))

    @description("Returns list of supported datastore drivers")
    def supported_drivers(self):
        result = {}
        for p in list(self.dispatcher.plugins.values()):
            if p.metadata and p.metadata.get('type') == 'datastore':
                result[p.metadata['driver']] = {
                    'clones': p.metadata['clones'],
                    'snapshots': p.metadata['snapshots']
                }

        return result

    @private
    @accepts(str)
    @returns(str)
    @description('Returns type of a datastore driver')
    def get_driver(self, id):
        ds = self.query([('id', '=', id)], {'single': True})
        if not ds:
            raise RpcException(errno.ENOENT, 'Datastore {0} not found'.format(id))

        return ds['type']

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence under a selected VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.directory_exists'.format(driver),
            datastore_id,
            normpath(datastore_path)
        )

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_filesystem_path'.format(driver),
            datastore_id,
            normpath(datastore_path)
        )

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_resources'.format(driver),
            datastore_id
        )

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshots'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.snapshot_exists'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_clone_source'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshot_clones'.format(driver),
            datastore_id,
            normpath(path)
        )

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync('vm.datastore.{0}.get_path_type'.format(driver), id, normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync('vm.datastore.{0}.list_dirs'.format(driver), id, normpath(path))
Esempio n. 7
0
class NFSDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts remote NFS VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'nfs':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join('/nfs', ds['name'], datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in NFS VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'nfs':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(os.path.join('/nfs', ds['name'], datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        return self.dispatcher.call_sync('vm.datastore.local.get_path_type', id, path)

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        return self.dispatcher.call_sync('vm.datastore.local.list_dirs', id, path)
Esempio n. 8
0
class LocalDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @description('Lists files or block devices')
    @accepts(h.ref('VmDatastorePathType'), str, str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id, root_path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', datastore_id, root_path)
        if not os.path.isdir(path):
            raise RpcException(errno.EINVAL, f'Selected path {root_path} is not a directory')

        result = []
        for i in os.listdir(path):
            abs_path = os.path.join(path, i)
            is_dir = os.path.isdir(abs_path)
            if not is_dir and type != 'FILE':
                continue

            result.append({
                'path': os.path.join('/', root_path, i),
                'type': 'DIRECTORY' if is_dir else 'FILE',
                'size': os.stat(abs_path).st_size,
                'description': ''
            })

        return result

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join(q.get(ds, 'properties.path'), datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in local VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(os.path.join(q.get(ds, 'properties.path'), datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description('Returns list of resources which have to be locked to safely perform VM datastore operations')
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', id, path)
        if not os.path.exists(path):
            raise RpcException(errno.ENOENT, 'Path {0} does not exist'.format(path))

        if os.path.isdir(path):
            return 'DIRECTORY'

        return 'FILE'

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path', id, path)
        return [p[0] for p in os.walk(path)]
Esempio n. 9
0
import errno
import logging
import os
import shutil
import ipfsapi
from requests.exceptions import ConnectionError
from datastore.config import ConfigNode
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns, private
from task import Task, Provider, TaskException, ValidationException, TaskDescription

logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}
Esempio n. 10
0
import errno
import logging
import os
import shutil
import ipfsapi
from requests.exceptions import ConnectionError
from datastore.config import ConfigNode
from freenas.dispatcher.rpc import RpcException, SchemaHelper as h, description, accepts, returns, private
from task import Task, Provider, TaskException, ValidationException, TaskDescription

logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}
Esempio n. 11
0
class TestProvider(Provider):
    @generator
    def stream(self, count=10):
        for i in range(0, count):
            yield {
                "id": 1,
                "value": "{0} bottles of beer on the wall".format(i)
            }

    @generator
    def wrapped_stream(self, count=10):
        return self.dispatcher.call_sync('test.stream', count)

    def sleep(self, n):
        time.sleep(n)
        return 'done'

    def rpcerror(self):
        raise RpcException(errno.EINVAL, 'Testing if parameter',
                           'This is in the extra paramaeter')

    def lazy_query(self, filter=None, params=None):
        def extend(obj):
            def doit():
                time.sleep(1)
                return 'I am so slow: {0}'.format(obj['id'])

            def doit2():
                time.sleep(1)
                return {'foo': obj['id'] + 1, 'bar': obj['id'] + 2}

            obj['fast_value'] = obj['id'] * 5
            obj['slow_value'] = lazy(doit)
            obj['composite_slow_value'] = lazy(doit2)
            return obj

        gen = ({'id': i} for i in range(0, 10))
        return q.query(gen, *(filter or []), callback=extend, **(params or {}))

    def exclude_string(self):
        return self.lazy_query([], {'exclude': 'slow_value'})

    def exclude_tuple(self):
        return self.lazy_query(
            [], {'exclude': ('slow_value', 'composite_slow_value.foo')})

    def exclude_lazy(self):
        return self.lazy_query(
            [], {'exclude': ('slow_value', 'composite_slow_value')})

    def attr_query(self):
        class Test(object):
            pass

        c = Test()
        d = {}
        q.set(c, 'f', True)
        q.set(d, 'f2', Test())
        q.set(d, 'f2.nested', True)

        if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(
                q.get(d, 'f2'), Test):
            l = [d, c]
            if q.contains(c, 'f'):
                q.delete(c, 'f')

                return bool(q.query(l, ('f2.nested', '=', True), count=True))

        return False

    @accepts(str, h.one_of(h.object(), None))
    def serviced_message(self, msg, extra=None):
        if extra is None:
            extra = {}
        try:
            push_status(msg, extra=extra)
        except:
            pass

    def nested_fd(self, data):
        fd = data['file_descriptor']
        assert isinstance(fd, FileDescriptor)
        with os.fdopen(fd.fd, 'wb') as f:
            f.write(b'hello\n')
Esempio n. 12
0
        os.setuid(user_uid)

    return set_ids


@description("Runs an Rsync Copy Task with the specified arguments")
@accepts(h.all_of(
    h.ref('rsync_copy'),
    h.required(
        'user',
        'path',
        'remote_host',
        'rsync_direction',
        'rsync_mode'
    ),
    h.one_of('remote_path', 'remote_module')
))
class RsyncCopyTask(ProgressTask):
    def describe(self, params):
        return 'Running Rsync Copy Task with user specified arguments'

    def verify(self, params):
        errors = []

        if self.datastore.get_one('users', ('username', '=', params.get('user'))) is None:
            raise VerifyException(
                errno.ENOENT, 'User {0} does not exist'.format(params.get('user'))
            )

        path = params.get('path')
        rmode = params.get('rsync_mode')
Esempio n. 13
0
class MailProvider(Provider):
    def get_config(self) -> AlertEmitterEmail:
        node = ConfigNode('mail', self.configstore).__getstate__()
        node['password'] = Password(node['password'])
        return AlertEmitterEmail(node)

    @accepts(h.ref('MailMessage'), h.one_of(h.ref('AlertEmitterEmail'), None))
    def send(self, mailmessage, mail=None):
        if mail is None:
            mail = ConfigNode('mail', self.configstore).__getstate__()
        elif mail.get('password'):
            mail['password'] = unpassword(mail['password'])

        if not mail.get('server') or not mail.get('port'):
            raise RpcException(
                errno.EINVAL,
                'You must provide an outgoing server and port when sending mail',
            )

        to = mailmessage.get('to')
        attachments = mailmessage.get('attachments')
        subject = mailmessage.get('subject')
        extra_headers = mailmessage.get('extra_headers')

        if not to:
            to = self.dispatcher.call_sync('user.query', [('username', '=', 'root')], {'single': True})
            if to and to.get('email'):
                to = [to['email']]

        if attachments:
            msg = MIMEMultipart()
            msg.preamble = mailmessage['message']
            list(map(lambda attachment: msg.attach(attachment), attachments))
        else:
            msg = MIMEText(mailmessage['message'], _charset='utf-8')
        if subject:
            msg['Subject'] = subject

        msg['From'] = mailmessage.get('from_address', mail['from_address'])
        msg['To'] = ', '.join(to)
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()
        version = self.dispatcher.call_sync('system.info.version').split('-')[0].lower()

        msg['Message-ID'] = "<{0}-{1}.{2}@{3}>".format(
            version,
            datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)),
            local_hostname)

        if not extra_headers:
            extra_headers = {}
        for key, val in list(extra_headers.items()):
            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val
        msg = msg.as_string()

        try:
            if mail['encryption'] == 'SSL':
                klass = smtplib.SMTP_SSL
            else:
                klass = smtplib.SMTP
            server = klass(mail['server'], mail['port'], timeout=300, local_hostname=local_hostname)
            if mail['encryption'] == 'TLS':
                server.starttls()

            if mail['auth']:
                server.login(mail['user'], mail['password'])
            server.sendmail(mail['from_address'], to, msg)
            server.quit()
        except smtplib.SMTPAuthenticationError as e:
            raise RpcException(errno.EACCES, 'Authentication error: {0} {1}'.format(
                e.smtp_code, e.smtp_error))
        except Exception as e:
            logger.error('Failed to send email: {0}'.format(str(e)), exc_info=True)
            raise RpcException(errno.EFAULT, 'Email send error: {0}'.format(str(e)))
        except:
            raise RpcException(errno.EFAULT, 'Unexpected error')
Esempio n. 14
0
class LocalDatastoreProvider(Provider):
    @private
    @generator
    def discover(self):
        return

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.join(q.get(ds, 'properties.path'), datastore_path)

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence in local VM datastore')
    def directory_exists(self, datastore_id, datastore_path):
        ds = self.datastore.get_by_id('vm.datastores', datastore_id)
        if ds['type'] != 'local':
            raise RpcException(errno.EINVAL, 'Invalid datastore type')

        return os.path.exists(
            os.path.join(q.get(ds, 'properties.path'), datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        return ['system']

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        return

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        return False

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        return None

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        return []

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path',
                                         id, path)
        if not os.path.exists(path):
            raise RpcException(errno.ENOENT,
                               'Path {0} does not exist'.format(path))

        if os.path.isdir(path):
            return 'DIRECTORY'

        return 'BLOCK_DEVICE'

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        path = self.dispatcher.call_sync('vm.datastore.get_filesystem_path',
                                         id, path)
        return [p[0] for p in os.walk(path)]
Esempio n. 15
0
            return path

        if type == 'FILE':
            return os.path.dirname(path)

        raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))


@description("Creates new share")
@accepts(
    h.all_of(
        h.ref('share'),
        h.required('name', 'type', 'target_type', 'target_path', 'properties')
    ),
    h.one_of(
        h.ref('volume-dataset-properties'),
        None
    ),
    bool
)
class CreateShareTask(Task):
    @classmethod
    def early_describe(cls):
        return "Creating share"

    def describe(self, share, dataset_properties=None, enable_service=False):
        return TaskDescription("Creating share {name}", name=share.get('name') if share else '')

    def verify(self, share, dataset_properties=None, enable_service=False):
        if not self.dispatcher.call_sync('share.supported_types').get(share['type']):
            raise VerifyException(errno.ENXIO, 'Unknown sharing type {0}'.format(share['type']))
Esempio n. 16
0
            return path

        if type == 'FILE':
            return os.path.dirname(path)

        raise RpcException(errno.EINVAL,
                           'Invalid share target type {0}'.format(type))


@description("Creates new share")
@accepts(
    h.all_of(
        h.ref('share'),
        h.required('name', 'type', 'target_type', 'target_path',
                   'properties')),
    h.one_of(h.ref('volume-dataset-properties'), None), bool)
class CreateShareTask(Task):
    @classmethod
    def early_describe(cls):
        return "Creating share"

    def describe(self, share, dataset_properties=None, enable_service=False):
        return TaskDescription("Creating share {name}",
                               name=share.get('name') if share else '')

    def verify(self, share, dataset_properties=None, enable_service=False):
        if not self.dispatcher.call_sync('share.supported_types').get(
                share['type']):
            raise VerifyException(
                errno.ENXIO, 'Unknown sharing type {0}'.format(share['type']))
Esempio n. 17
0
class DatastoreProvider(Provider):
    @query('VmDatastore')
    @generator
    def query(self, filter=None, params=None):
        drivers = self.supported_drivers()

        def extend(obj):
            obj['capabilities'] = drivers[obj['type']]
            obj['state'] = lazy(self.dispatcher.call_sync,
                                f'vm.datastore.{obj["type"]}.get_state',
                                obj['id'])
            return obj

        def doit():
            for i in drivers:
                with contextlib.suppress(Exception):
                    for d in self.dispatcher.call_sync(
                            'vm.datastore.{0}.discover'.format(i)):
                        yield extend(d)

            yield from self.datastore.query_stream('vm.datastores',
                                                   callback=extend)

        return q.query(doit(), *(filter or []), **(params or {}))

    @description("Returns list of supported datastore drivers")
    def supported_drivers(self):
        result = {}
        for p in list(self.dispatcher.plugins.values()):
            if p.metadata and p.metadata.get('type') == 'datastore':
                result[p.metadata['driver']] = {
                    'clones': p.metadata['clones'],
                    'snapshots': p.metadata['snapshots']
                }

        return result

    @private
    @accepts(str)
    @returns(str)
    def get_state(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(f'vm.datastore.{driver}.get_state',
                                         datastore_id)

    @description('Lists disks or files or block devices')
    @accepts(h.ref('VmDatastorePathType'), h.one_of(str, None), str)
    @returns(h.array(h.ref('VmDatastoreItem')))
    def list(self, type, datastore_id=None, root_path='/'):
        if type == 'DISK':
            available_disks_paths = self.dispatcher.call_sync(
                'volume.get_available_disks')
            available_disks = self.dispatcher.call_sync(
                'disk.query', [('path', 'in', available_disks_paths)],
                {'select': ('id', 'path', 'status.description', 'mediasize')})
            return [{
                'path': i,
                'size': s,
                'description': '{} {}'.format(p, d),
                'type': type
            } for i, p, d, s in available_disks]

        if not datastore_id:
            raise RpcException(
                errno.EINVAL,
                'Datastore ID has to be specified for BLOCK and FILE path types'
            )

        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.list'.format(driver), type, datastore_id,
            normpath(root_path))

    @private
    @accepts(str)
    @returns(str)
    @description('Returns type of a datastore driver')
    def get_driver(self, id):
        type = self.query([('id', '=', id)], {
            'single': True,
            'select': 'type'
        })
        if not type:
            raise RpcException(errno.ENOENT,
                               'Datastore {0} not found'.format(id))

        return type

    @private
    @accepts(str, str)
    @returns(bool)
    @description('Checks for directory existence under a selected VM datastore'
                 )
    def directory_exists(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.directory_exists'.format(driver), datastore_id,
            normpath(datastore_path))

    @private
    @accepts(str, str)
    @returns(str)
    @description('Converts VM datastore path to local filesystem path')
    def get_filesystem_path(self, datastore_id, datastore_path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_filesystem_path'.format(driver),
            datastore_id, normpath(datastore_path))

    @private
    @accepts(str)
    @returns(h.array(str))
    @description(
        'Returns list of resources which have to be locked to safely perform VM datastore operations'
    )
    def get_resources(self, datastore_id):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_resources'.format(driver), datastore_id)

    @private
    @generator
    @accepts(str, str)
    @description('Returns a list of snapshots on a given VM datastore path')
    def get_snapshots(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshots'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(bool)
    def snapshot_exists(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.snapshot_exists'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.one_of(str, None))
    def get_clone_source(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_clone_source'.format(driver), datastore_id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def get_snapshot_clones(self, datastore_id, path):
        driver = self.get_driver(datastore_id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_snapshot_clones'.format(driver),
            datastore_id, normpath(path))

    @private
    @accepts(str, str)
    @returns(h.ref('VmDatastorePathType'))
    def get_path_type(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.get_path_type'.format(driver), id,
            normpath(path))

    @private
    @accepts(str, str)
    @returns(h.array(str))
    def list_dirs(self, id, path):
        driver = self.get_driver(id)
        return self.dispatcher.call_sync(
            'vm.datastore.{0}.list_dirs'.format(driver), id, normpath(path))
Esempio n. 18
0
            handler.error = True
            handler.emit_update_details()
            raise TaskException(
                errno.EAGAIN,
                'Downloading Updates Failed for some reason, check logs')
        check_updates(self.dispatcher,
                      self.configstore,
                      cache_dir=cache_dir,
                      check_now=False)
        handler.finished = True
        handler.emit_update_details()
        self.set_progress(100, 'Updates finished downloading')


@description("Apply a manual update using specified tarfile")
@accepts(str, h.one_of(bool, None))
class UpdateManualTask(ProgressTask):
    @classmethod
    def early_describe(cls):
        return 'Updating via the provided update tarfile'

    def describe(self, path, reboot_post_install=False):
        return TaskDescription(
            "Updating from tarfile ({name})".format(name=path))

    def verify(self, path, reboot_post_install=False):

        if not os.path.exists(path):
            raise VerifyException(errno.EEXIST, 'File does not exist')

        if not tarfile.is_tarfile(path):
Esempio n. 19
0
            return path

        if type == 'FILE':
            return os.path.dirname(path)

        raise RpcException(errno.EINVAL, 'Invalid share target type {0}'.format(type))


@description("Creates new share")
@accepts(
    h.all_of(
        h.ref('Share'),
        h.required('name', 'type', 'target_type', 'target_path', 'properties')
    ),
    h.one_of(
        h.ref('VolumeDatasetProperties'),
        None
    ),
    bool
)
class CreateShareTask(Task):
    @classmethod
    def early_describe(cls):
        return "Creating share"

    def describe(self, share, dataset_properties=None, enable_service=False):
        return TaskDescription("Creating share {name}", name=share.get('name') if share else '')

    def verify(self, share, dataset_properties=None, enable_service=False):
        if not self.dispatcher.call_sync('share.supported_types').get(share['type']):
            raise VerifyException(errno.ENXIO, 'Unknown sharing type {0}'.format(share['type']))
Esempio n. 20
0
                errno.EAGAIN, 'Got exception {0} while trying to Download Updates'.format(str(e))
            )
        if not download_successful:
            handler.error = True
            handler.emit_update_details()
            raise TaskException(
                errno.EAGAIN, 'Downloading Updates Failed for some reason, check logs'
            )
        check_updates(self.dispatcher, self.configstore, cache_dir=cache_dir, check_now=False)
        handler.finished = True
        handler.emit_update_details()
        self.set_progress(100, 'Updates finished downloading')


@description("Apply a manual update using specified tarfile")
@accepts(str, h.one_of(bool, None))
class UpdateManualTask(ProgressTask):
    @classmethod
    def early_describe(cls):
        return 'Updating via the provided update tarfile'

    def describe(self, path, reboot_post_install=False):
        return TaskDescription("Updating from tarfile ({name})".format(name=path))

    def verify(self, path, reboot_post_install=False):

        if not os.path.exists(path):
            raise VerifyException(errno.EEXIST, 'File does not exist')

        if not tarfile.is_tarfile(path):
            raise VerifyException(errno.EEXIST, 'File does not exist')