예제 #1
0
class CertificateProvider(Provider):
    @query('CryptoCertificate')
    @generator
    def query(self, filter=None, params=None):
        def extend(certificate):
            if certificate['type'].startswith('CA_'):
                cert_path = '/etc/certificates/CA'
            else:
                cert_path = '/etc/certificates'

            if certificate.get('certificate'):
                certificate['certificate_path'] = os.path.join(
                    cert_path, '{0}.crt'.format(certificate['name']))

            if certificate.get('privatekey'):
                certificate['privatekey'] = Password(certificate['privatekey'])
                certificate['privatekey_path'] = os.path.join(
                    cert_path, '{0}.key'.format(certificate['name']))

            if certificate.get('csr'):
                certificate['csr_path'] = os.path.join(
                    cert_path, '{0}.csr'.format(certificate['name']))

            return certificate

        return q.query(self.datastore.query_stream('crypto.certificates',
                                                   callback=extend),
                       *(filter or []),
                       stream=True,
                       **(params or {}))

    @accepts()
    @returns(h.object())
    def get_country_codes(self):
        return COUNTRY_CODES
예제 #2
0
class SystemDatasetProvider(Provider):
    @private
    @description("Initializes the .system dataset")
    @accepts()
    def init(self):
        pool = self.configstore.get('system.dataset.pool')
        dsid = self.configstore.get('system.dataset.id')
        create_system_dataset(self.dispatcher, dsid, pool)
        mount_system_dataset(self.dispatcher, dsid, pool, SYSTEM_DIR)
        link_directories(self.dispatcher)

    @private
    @description(
        "Creates directory in .system dataset and returns reference to it")
    @accepts(str)
    @returns(str)
    def request_directory(self, name):
        path = os.path.join(SYSTEM_DIR, name)
        if os.path.exists(path):
            if os.path.isdir(path):
                return path

            raise RpcException(errno.EPERM,
                               'Cannot grant directory {0}'.format(name))

        os.mkdir(path)
        return path

    @description("Returns current .system dataset parameters")
    @returns(h.object())
    def status(self):
        return {
            'id': self.configstore.get('system.dataset.id'),
            'pool': self.configstore.get('system.dataset.pool')
        }
예제 #3
0
class DynDNSProvider(Provider):
    @private
    @accepts()
    @returns(h.ref('ServiceDyndns'))
    def get_config(self):
        return ConfigNode('service.dyndns', self.configstore).__getstate__()

    @accepts()
    @returns(h.object())
    def providers(self):
        return PROVIDERS
예제 #4
0
class SystemInfoProvider(Provider):
    def __init__(self):
        self.__version = None

    @accepts()
    @returns(h.array(str))
    def uname_full(self):
        return os.uname()

    @accepts()
    @returns(str)
    @description("Return the full version string, e.g. FreeNAS-8.1-r7794-amd64.")
    def version(self):
        if self.__version is None:
            # See #9113
            conf = Configuration.Configuration()
            manifest = conf.SystemManifest()
            if manifest:
                self.__version = manifest.Version()
            else:
                with open(VERSION_FILE) as fd:
                    self.__version = fd.read().strip()

        return self.__version

    @accepts()
    @returns({'type': 'array', 'items': {'type': 'number'}, 'maxItems': 3, 'minItems': 3})
    def load_avg(self):
        return list(os.getloadavg())

    @accepts()
    @returns(h.object(properties={
        'cpu_model': str,
        'cpu_cores': int,
        'cpu_clockrate': int,
        'memory_size': int,
        'vm_guest': h.one_of(str, None)
    }))
    def hardware(self):
        vm_guest = get_sysctl("kern.vm_guest")
        return {
            'cpu_model': get_sysctl("hw.model"),
            'cpu_cores': get_sysctl("hw.ncpu"),
            'cpu_clockrate': get_sysctl("hw.clockrate"),
            'memory_size': get_sysctl("hw.physmem"),
            'vm_guest': None if vm_guest == 'none' else vm_guest
        }

    @accepts()
    @returns(str)
    def host_uuid(self):
        return get_sysctl("kern.hostuuid")[:-1]
예제 #5
0
logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}

ipfs_task_types = {}

ipfs_rpcs = {
    'id': {
        'accepts': ()
    },
    'version': {
        'accepts': ()
    },
    'swarm_peers': {
예제 #6
0
            if not self.datastore.exists('groups', ('gid', '=', i)):
                gid = i
                break

        if not gid:
            raise RpcException(errno.ENOSPC, 'No free GIDs available')

        return gid


@description("Create an user in the system")
@accepts(h.all_of(
    h.ref('user'),
    h.required('username'),
    h.forbidden('builtin'),
    h.object(properties={'password': {'type': ['string', 'null']}}),
    h.any_of(
        h.required('password'),
        h.required('unixhash', 'nthash'),
        h.required('password_disabled')
    )
))
class UserCreateTask(Task):
    def __init__(self, dispatcher, datastore):
        super(UserCreateTask, self).__init__(dispatcher, datastore)
        self.id = None
        self.created_group = False

    @classmethod
    def early_describe(cls):
        return "Creating user"
예제 #7
0
                    if action == 'reload' and i not in reload_scripts:
                        continue

                    system("/usr/sbin/service", i, 'one' + action)

        except SubprocessException as e:
            raise TaskException(errno.EBUSY, e.err)

        self.dispatcher.dispatch_event('services.changed', {
            'operation': 'update',
            'ids': [service['id']]
        })


@description("Updates configuration for services")
@accepts(str, h.object())
class UpdateServiceConfigTask(Task):
    def describe(self, service, updated_fields):
        return "Updating configuration for service {0}".format(service)

    def verify(self, service, updated_fields):
        if not self.datastore.exists('service_definitions', ('name', '=', service)):
            raise VerifyException(
                errno.ENOENT,
                'Service {0} not found'.format(service))
        for x in updated_fields:
            if not self.configstore.exists(
                    'service.{0}.{1}'.format(service, x)):
                raise VerifyException(
                    errno.ENOENT,
                    'Service {0} does not have the following key: {1}'.format(
예제 #8
0
            if not self.datastore.exists('groups', ('id', '=', i)):
                gid = i
                break

        if not gid:
            raise RpcException(errno.ENOSPC, 'No free GIDs available')

        return gid


@description("Create an user in the system")
@accepts(h.all_of(
    h.ref('user'),
    h.required('username'),
    h.forbidden('builtin'),
    h.object({'password': {'type': 'string'}}),
    h.any_of(
        h.required('password'),
        h.required('unixhash', 'smbhash'),
        h.required('password_disabled')
    )
))
class UserCreateTask(Task):
    def describe(self, user):
        return "Adding user {0}".format(user['username'])

    def verify(self, user):
        errors = []

        for code, message in check_unixname(user['username']):
            errors.append(('name', code, message))
def _init(dispatcher, plugin):

    def nightly_update_check(args):
        if args.get('name') != 'scheduler.management':
            return

        logger.debug('Scheduling a nightly update check task')
        caltask = dispatcher.call_sync(
            'calendar_task.query', [('name', '=', 'update.checkfetch')], {'single': True}
        ) or {'schedule': {}}

        caltask.update({
            'name': 'update.checkfetch',
            'args': [True],
            'hidden': True,
            'protected': True,
            'description': 'Nightly update check',
        })
        caltask['schedule'].update({
            'hour': str(random.randint(1, 6)),
            'minute': str(random.randint(0, 59)),
        })

        if caltask.get('id'):
            dispatcher.call_task_sync('calendar_task.update', caltask['id'], caltask)
        else:
            dispatcher.call_task_sync('calendar_task.create', caltask)

    # Register Schemas
    plugin.register_schema_definition('update', {
        'type': 'object',
        'properties': {
            'train': {'type': 'string'},
            'check_auto': {'type': 'boolean'},
            'update_server': {'type': 'string', 'readOnly': True},
        },
    })

    plugin.register_schema_definition('update-progress', h.object(properties={
        'operation': h.enum(str, ['DOWNLOADING', 'INSTALLING']),
        'details': str,
        'indeterminate': bool,
        'percent': int,
        'reboot': bool,
        'pkg_name': str,
        'pkg_version': str,
        'filesize': int,
        'num_files_done': int,
        'num_files_total': int,
        'error': bool,
        'finished': bool,
    }))

    plugin.register_schema_definition('update-ops', {
        'type': 'object',
        'properties': {
            'operation': {
                'type': 'string',
                'enum': ['delete', 'install', 'upgrade']
            },
            'new_name': {'type': ['string', 'null']},
            'new_version': {'type': ['string', 'null']},
            'previous_name': {'type': ['string', 'null']},
            'previous_version': {'type': ['string', 'null']},
        }
    })

    plugin.register_schema_definition('update-info', {
        'type': 'object',
        'properties': {
            'notes': {'type': 'object'},
            'notice': {'type': 'string'},
            'changelog': {
                'type': 'array',
                'items': {'type': 'string'},
            },
            'operations': {'$ref': 'update-ops'},
            'downloaded': {'type': 'boolean'},
        }
    })

    plugin.register_schema_definition('update-train', {
        'type': 'object',
        'properties': {
            'name': {'type': 'string'},
            'description': {'type': 'string'},
            'sequence': {'type': 'string'},
            'current': {'type': 'boolean'},
        }
    })

    # Register providers
    plugin.register_provider("update", UpdateProvider)

    # Register task handlers
    plugin.register_task_handler("update.update", UpdateConfigureTask)
    plugin.register_task_handler("update.check", CheckUpdateTask)
    plugin.register_task_handler("update.download", DownloadUpdateTask)
    plugin.register_task_handler("update.manual", UpdateManualTask)
    plugin.register_task_handler("update.apply", UpdateApplyTask)
    plugin.register_task_handler("update.verify", UpdateVerifyTask)
    plugin.register_task_handler("update.checkfetch", CheckFetchUpdateTask)
    plugin.register_task_handler("update.updatenow", UpdateNowTask)

    # Register Event Types
    plugin.register_event_type('update.in_progress', schema=h.ref('update-progress'))
    plugin.register_event_type('update.changed')

    # Register reources
    plugin.register_resource(Resource(update_resource_string), ['system'])

    # Get the Update Cache (if any) at system boot (and hence in init here)
    # Do this in parallel so that a failed cache generation does not take the
    # entire dispatcher start/restart with it (See Ticket: #12892)
    gevent.spawn(generate_update_cache, dispatcher)

    # Schedule a task to check/dowload for updates
    plugin.register_event_handler('plugin.service_resume', nightly_update_check)
예제 #10
0
        except RpcException as e:
            raise TaskException(errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('crypto.certificates.changed', {
            'operation': 'create',
            'ids': [pkey]
        })

        return pkey


@accepts(h.object({
    'properties': {
        'name': {'type': 'string'},
        'certificate': {'type': 'string'},
        'privatekey': {'type': 'string'},
        'passphrase': {'type': 'string'},
    },
    'additionalProperties': False,
    'required': ['name', 'certificate', 'privatekey', 'passphrase'],
}))
class CertificateImportTask(Task):
    def verify(self, certificate):

        if self.datastore.exists('crypto.certificates', ('name', '=', certificate['name'])):
            raise VerifyException(errno.EEXIST, 'Certificate with given name already exists')

        try:
            load_privatekey(certificate['privatekey'], certificate.get('passphrase'))
        except Exception:
            raise VerifyException(errno.EINVAL, 'Invalid passphrase')
예제 #11
0
            if not self.datastore.exists('groups', ('gid', '=', i)):
                gid = i
                break

        if not gid:
            raise RpcException(errno.ENOSPC, 'No free GIDs available')

        return gid


@description("Create an user in the system")
@accepts(h.all_of(
    h.ref('User'),
    h.required('username'),
    h.forbidden('builtin'),
    h.object(properties={'password': {'type': ['password', 'null']}}),
    h.any_of(
        h.required('password'),
        h.required('unixhash', 'nthash'),
        h.required('password_disabled')
    )
))
class UserCreateTask(Task):
    def __init__(self, dispatcher):
        super(UserCreateTask, self).__init__(dispatcher)
        self.id = None
        self.created_group = False

    @classmethod
    def early_describe(cls):
        return "Creating user"
예제 #12
0
logger = logging.getLogger('IPFSPlugin')

ipfs_tasks = {
    'add': {
        'early_describe': 'Calling IPFS add',
        'accepts': (h.one_of(str, h.array(str)), bool),
        'args': ('files', 'recursive')
    },
    'get': {
        'early_describe': 'Calling IPFS get',
        'accepts': (str, h.one_of(str, None)),
        'args': ('multihash', 'filepath')
    },
    'add_json': {
        'early_describe': 'Calling IPFS add json',
        'accepts': (h.object(),),
        'args': ('json_obj',)
    }

}

ipfs_task_types = {}

ipfs_rpcs = {
    'id': {
        'accepts': ()
    },
    'version': {
        'accepts': ()
    },
    'swarm_peers': {
예제 #13
0
class TestProvider(Provider):
    @generator
    def stream(self, count=10):
        for i in range(0, count):
            yield {
                "id": 1,
                "value": "{0} bottles of beer on the wall".format(i)
            }

    @generator
    def wrapped_stream(self, count=10):
        return self.dispatcher.call_sync('test.stream', count)

    def sleep(self, n):
        time.sleep(n)
        return 'done'

    def rpcerror(self):
        raise RpcException(errno.EINVAL, 'Testing if parameter',
                           'This is in the extra paramaeter')

    def lazy_query(self, filter=None, params=None):
        def extend(obj):
            def doit():
                time.sleep(1)
                return 'I am so slow: {0}'.format(obj['id'])

            def doit2():
                time.sleep(1)
                return {'foo': obj['id'] + 1, 'bar': obj['id'] + 2}

            obj['fast_value'] = obj['id'] * 5
            obj['slow_value'] = lazy(doit)
            obj['composite_slow_value'] = lazy(doit2)
            return obj

        gen = ({'id': i} for i in range(0, 10))
        return q.query(gen, *(filter or []), callback=extend, **(params or {}))

    def exclude_string(self):
        return self.lazy_query([], {'exclude': 'slow_value'})

    def exclude_tuple(self):
        return self.lazy_query(
            [], {'exclude': ('slow_value', 'composite_slow_value.foo')})

    def exclude_lazy(self):
        return self.lazy_query(
            [], {'exclude': ('slow_value', 'composite_slow_value')})

    def attr_query(self):
        class Test(object):
            pass

        c = Test()
        d = {}
        q.set(c, 'f', True)
        q.set(d, 'f2', Test())
        q.set(d, 'f2.nested', True)

        if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(
                q.get(d, 'f2'), Test):
            l = [d, c]
            if q.contains(c, 'f'):
                q.delete(c, 'f')

                return bool(q.query(l, ('f2.nested', '=', True), count=True))

        return False

    @accepts(str, h.one_of(h.object(), None))
    def serviced_message(self, msg, extra=None):
        if extra is None:
            extra = {}
        try:
            push_status(msg, extra=extra)
        except:
            pass

    def nested_fd(self, data):
        fd = data['file_descriptor']
        assert isinstance(fd, FileDescriptor)
        with os.fdopen(fd.fd, 'wb') as f:
            f.write(b'hello\n')
            if not self.datastore.exists('groups', ('gid', '=', i)):
                gid = i
                break

        if not gid:
            raise RpcException(errno.ENOSPC, 'No free GIDs available')

        return gid


@description("Create an user in the system")
@accepts(h.all_of(
    h.ref('user'),
    h.required('username'),
    h.forbidden('builtin'),
    h.object(properties={'password': {'type': 'string'}}),
    h.any_of(
        h.required('password'),
        h.required('unixhash', 'smbhash'),
        h.required('password_disabled')
    )
))
class UserCreateTask(Task):
    def __init__(self, dispatcher, datastore):
        super(UserCreateTask, self).__init__(dispatcher, datastore)
        self.uid = None
        self.created_group = False

    def describe(self, user):
        return "Adding user {0}".format(user['username'])
예제 #15
0
                'name': name,
                'type': type,
                'mountpoint': mountpoint,
                'topology': volume['topology'],
                'attributes': volume.get('attributes', {})
            })

        self.set_progress(90)
        self.dispatcher.dispatch_event('volumes.changed', {
            'operation': 'create',
            'ids': [id]
        })


@description("Creates new volume and automatically guesses disks layout")
@accepts(str, str, h.array(str), h.object())
class VolumeAutoCreateTask(Task):
    def verify(self, name, type, disks, params=None):
        if self.datastore.exists('volumes', ('name', '=', name)):
            raise VerifyException(errno.EEXIST,
                                  'Volume with same name already exists')

        return ['disk:{0}'.format(os.path.join('/dev', i)) for i in disks]

    def run(self, name, type, disks, params=None):
        vdevs = []
        if len(disks) % 3 == 0:
            for i in range(0, len(disks), 3):
                vdevs.append({
                    'type': 'raidz1',
                    'children': [{'type': 'disk', 'path': os.path.join('/dev', i)} for i in disks[i:i+3]]
예제 #16
0
                trains = list(trains.keys())
            if train_to_set not in trains:
                raise TaskException(
                    errno.ENOENT,
                    '{0} is not a valid train'.format(train_to_set))
            self.configstore.set('update.train', train_to_set)
        if 'check_auto' in props:
            self.configstore.set('update.check_auto', props.get('check_auto'))
        self.dispatcher.dispatch_event('update.changed',
                                       {'operation': 'update'})


@description("Checks for Available Updates and returns if update is available "
             "and if yes returns information on operations that will be "
             "performed during the update")
@accepts(h.object(properties={'check_now': bool}))
class CheckUpdateTask(Task):
    @classmethod
    def early_describe(cls):
        return "Checking for updates"

    def describe(self, conditions=None):
        return TaskDescription("Checking for updates")

    def verify(self, conditions=None):
        # TODO: Fix this verify's resource allocation as unique task
        block = self.dispatcher.resource_graph.get_resource(
            update_resource_string)
        if block is not None and block.busy:
            raise VerifyException(
                errno.EBUSY,
                raise TaskException(errno.ENOENT, '{0} is not a valid train'.format(train_to_set))
            self.configstore.set('update.train', train_to_set)
        if 'check_auto' in props:
            self.configstore.set('update.check_auto', props.get('check_auto'))
        self.dispatcher.dispatch_event('update.changed', {
            'operation': 'update',
        })


@description((
    "Checks for Available Updates and returns if update is availabe "
    "and if yes returns information on operations that will be "
    "performed during the update"
))
@accepts(h.object(properties={
            'check_now': bool,
            }))
class CheckUpdateTask(Task):
    def describe(self):
        return "Checks for Updates and Reports Operations to be performed"

    def verify(self, conditions={}):
        # TODO: Fix this verify's resource allocation as unique task
        block = self.dispatcher.resource_graph.get_resource(update_resource_string)
        if block is not None and block.busy:
            raise VerifyException(errno.EBUSY, (
                'An Update Operation (Configuration/ Download/ Applying'
                'the Updates) is already in the queue, please retry later'
            ))

        return [update_resource_string]
예제 #18
0
class UpdateProvider(Provider):
    @accepts()
    @returns(str)
    def is_update_available(self):
        temp_available = update_cache.get('available', timeout=1)
        if temp_available is not None:
            return temp_available
        elif update_cache.is_valid('available'):
            return temp_available
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(str))
    def obtain_changelog(self):
        temp_changelog = update_cache.get('changelog', timeout=1)
        if temp_changelog is not None:
            return temp_changelog
        elif update_cache.is_valid('changelog'):
            return temp_changelog
        else:
            raise RpcException(
                errno.EBUSY, ('Changelog list is invalidated, an Update Check '
                              'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.array(h.ref('update-ops')))
    def get_update_ops(self):
        temp_operations = update_cache.get('operations', timeout=1)
        if temp_operations is not None:
            return temp_operations
        elif update_cache.is_valid('operations'):
            return temp_operations
        else:
            raise RpcException(
                errno.EBUSY,
                ('Update Operations Dict is invalidated, an Update Check '
                 'might be underway. Try again in some time.'))

    @accepts()
    @returns(h.ref('update-info'))
    def update_info(self):
        if not update_cache.is_valid('available'):
            raise RpcException(
                errno.EBUSY,
                ('Update Availability flag is invalidated, an Update Check'
                 ' might be underway. Try again in some time.'))
        info_item_list = [
            'available', 'changelog', 'notes', 'notice', 'operations',
            'downloaded', 'version', 'installed', 'installed_version'
        ]
        return {
            key: update_cache.get(key, timeout=1)
            for key in info_item_list
        }

    @returns(h.any_of(
        h.array(h.ref('update-train')),
        None,
    ))
    def trains(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        trains = conf.AvailableTrains()

        if trains is None:
            logger.debug(
                'The AvailableTrains call returned None. Check your network connection'
            )
            return None
        seltrain = self.dispatcher.configstore.get('update.train')

        data = []
        for name in list(trains.keys()):
            if name in conf._trains:
                train = conf._trains.get(name)
            else:
                train = Train.Train(name)
            data.append({
                'name': train.Name(),
                'description': train.Description(),
                'sequence': train.LastSequence(),
                'current': True if name == seltrain else False,
            })
        return data

    @accepts()
    @returns(str)
    def get_current_train(self):
        conf = Configuration.Configuration()
        conf.LoadTrainsConfig()
        return conf.CurrentTrain()

    @accepts()
    @returns(h.ref('update'))
    def get_config(self):
        return {
            'train': self.dispatcher.configstore.get('update.train'),
            'check_auto': self.dispatcher.configstore.get('update.check_auto'),
            'update_server': Configuration.Configuration().UpdateServerURL(),
        }

    @private
    @accepts(h.array(str))
    def update_cache_invalidate(self, value_list):
        for item in value_list:
            update_cache.invalidate(item)

    @private
    @accepts(h.object())
    def update_cache_putter(self, value_dict):
        for key, value in value_dict.items():
            update_cache.put(key, value)
        self.dispatcher.dispatch_event('update.update_info.updated',
                                       {'operation': 'update'})

    @private
    @accepts(str)
    @returns(h.any_of(None, str, bool, h.array(str)))
    def update_cache_getter(self, key):
        return update_cache.get(key, timeout=1)

    @private
    @accepts(str, str, h.any_of(None, h.object(additionalProperties=True)))
    def update_alert_set(self, update_class, update_version, kwargs=None):
        # Formulating a query to find any alerts in the current `update_class`
        # which could be either of ('UpdateAvailable', 'UpdateDownloaded', 'UpdateInstalled')
        # as well as any alerts for the specified update version string.
        # The reason I do this is because say an Update is Downloaded (FreeNAS-10-2016051047)
        # and there is either a previous alert for an older downloaded update OR there is a
        # previous alert for the same version itself but for it being available instead of being
        # downloaded already, both of these previous alerts would need to be cancelled and
        # replaced by 'UpdateDownloaded' for FreeNAS-10-2016051047.
        if kwargs is None:
            kwargs = {}
        existing_update_alerts = self.dispatcher.call_sync(
            'alert.query', [('and', [('active', '=', True),
                                     ('dismissed', '=', False)]),
                            ('or', [('class', '=', update_class),
                                    ('target', '=', update_version)])])
        title = UPDATE_ALERT_TITLE_MAP.get(update_class, 'Update Alert')
        desc = kwargs.get('desc')
        if desc is None:
            if update_class == 'UpdateAvailable':
                desc = 'Latest Update: {0} is available for download'.format(
                    update_version)
            elif update_class == 'UpdateDownloaded':
                desc = 'Update containing {0} is downloaded and ready for install'.format(
                    update_version)
            elif update_class == 'UpdateInstalled':
                update_installed_bootenv = kwargs.get(
                    'update_installed_bootenv')
                if update_installed_bootenv and not update_installed_bootenv[
                        0]['on_reboot']:
                    desc = 'Update containing {0} is installed.'.format(
                        update_version)
                    desc += ' Please activate {0} and Reboot to use this updated version'.format(
                        update_installed_bootenv[0]['realname'])
                else:
                    desc = 'Update containing {0} is installed and activated for next boot'.format(
                        update_version)
            else:
                # what state is this?
                raise RpcException(
                    errno.EINVAL,
                    'Unknown update alert class: {0}'.format(update_class))
        alert_payload = {
            'class': update_class,
            'title': title,
            'target': update_version,
            'description': desc
        }

        alert_exists = False
        # Purposely deleting stale alerts later on since if anything (in constructing the payload)
        # above this fails the exception prevents alert.cancel from being called.
        for update_alert in existing_update_alerts:
            if (update_alert['class'] == update_class
                    and update_alert["target"] == update_version
                    and update_alert["description"] == desc):
                alert_exists = True
                continue
            self.dispatcher.call_sync('alert.cancel', update_alert['id'])

        if not alert_exists:
            self.dispatcher.call_sync('alert.emit', alert_payload)
@description("Provides access to configuration store")
class ConfigProvider(Provider):
    @private
    @accepts(str)
    @returns((str, int, bool, None))
    def get(self, key):
        return self.dispatcher.configstore.get(key)

    @private
    def list(self, root):
        return self.dispatcher.configstore.list_children(root)


@description("Updates configuration settings")
@accepts(h.object())
class UpdateConfigTask(Task):
    def verify(self, settings):
        return ['system']

    def run(self, settings):
        for i in settings:
            self.configstore.set(i['key'], i['value'])

        self.dispatcher.dispatch_event('config.changed', {
            'operation': 'update',
            'ids': [list(settings.keys())]
        })


def _init(dispatcher, plugin):
예제 #20
0
        if scrub.state == libzfs.ScanState.SCANNING:
            self.progress = scrub.percentage
            return TaskStatus(self.progress, "In progress...")

        if scrub.state == libzfs.ScanState.CANCELED:
            self.finish_event.set()
            return TaskStatus(self.progress, "Canceled")

        if scrub.state == libzfs.ScanState.FINISHED:
            self.finish_event.set()
            return TaskStatus(100, "Finished")


@description("Creates new ZFS pool")
@accepts(str, h.ref('zfs-topology'), h.object())
class ZpoolCreateTask(Task):
    def __partition_to_disk(self, part):
        result = self.dispatcher.call_sync('disks.get_partition_config', part)
        return os.path.basename(result['disk'])

    def __get_disks(self, topology):
        result = []
        for gname, vdevs in list(topology.items()):
            for vdev in vdevs:
                if vdev['type'] == 'disk':
                    result.append(self.__partition_to_disk(vdev['path']))
                    continue

                if 'children' in vdev:
                    result += [self.__partition_to_disk(i['path']) for i in vdev['children']]
예제 #21
0
class ServiceInfoProvider(Provider):
    @description("Lists available services")
    @query("service")
    @generator
    def query(self, filter=None, params=None):
        def extend(i):
            lazy_status = lazy(get_status, self.dispatcher, self.datastore, i)
            entry = {
                'id': i['id'],
                'name': i['name'],
                'dependencies': i.get('dependencies', []),
                'labels': None,
                'state': lazy(lambda: lazy_status()[0]),
                'error': lazy(lambda: lazy_status()[1]),
            }

            if 'launchd' in i:
                launchd = i['launchd']
                jobs = [launchd] if isinstance(launchd, dict) else launchd
                entry['labels'] = [j['Label'] for j in jobs]

            entry['pid'] = lazy(lambda: lazy_status()[2]),
            entry['builtin'] = i['builtin']
            entry['config'] = lazy(self.get_service_config, i['id'])
            return entry

        return q.query(self.datastore.query_stream('service_definitions',
                                                   callback=extend),
                       *(filter or []),
                       stream=True,
                       **(params or {}))

    @accepts(str)
    @returns(h.object())
    def get_service_config(self, id):
        svc = self.datastore.get_by_id('service_definitions', id)
        if not svc:
            raise RpcException(errno.EINVAL, 'Invalid service name')

        if svc.get('get_config_rpc'):
            ret = self.dispatcher.call_sync(svc['get_config_rpc'])
        else:
            ret = ConfigNode('service.{0}'.format(svc['name']),
                             self.configstore).__getstate__()

        if not ret:
            return

        return extend_dict(ret, {'type': 'service-{0}'.format(svc['name'])})

    @private
    @accepts(str)
    def ensure_started(self, service):
        # XXX launchd!
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if 'launchd' in svc:
            launchd = svc['launchd']
            plists = [launchd] if isinstance(launchd, dict) else launchd

            for i in plists:
                self.dispatcher.call_sync('serviced.job.start', i['Label'])

            return

        if 'start_rpc' in svc:
            try:
                self.dispatcher.call_sync(svc['start_rpc'])
                return True
            except RpcException:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst starting service {0} rpc '{1}' failed".format(
                        service, svc['start_rpc']))

    @private
    @accepts(str)
    def ensure_stopped(self, service):
        # XXX launchd!
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if 'launchd' in svc:
            launchd = svc['launchd']
            plists = [launchd] if isinstance(launchd, dict) else launchd

            for i in plists:
                self.dispatcher.call_sync('serviced.job.stop', i['Label'])

            return

        if 'stop_rpc' in svc:
            try:
                self.dispatcher.call_sync(svc['stop_rpc'])
                return True
            except RpcException:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst starting service {0} rpc '{1}' failed".format(
                        service, svc['stop_rpc']))

    @private
    @accepts(str)
    def reload(self, service):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        status, _, pid = get_status(self.dispatcher, self.datastore, svc)
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if status != 'RUNNING':
            return

        if svc.get('dependencies'):
            for i in self.datastore.query('service_definitions',
                                          ('id', 'in', svc['dependencies'])):
                self.reload(i['name'])

        if pid:
            for p in pid:
                try:
                    os.kill(p, signal.SIGHUP)
                except ProcessLookupError:
                    continue

    @private
    @accepts(str)
    def restart(self, service):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        status, _, _ = get_status(self.dispatcher, self.datastore, svc)
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if status != 'RUNNING':
            return

        hook_rpc = svc.get('restart_rpc')
        if hook_rpc:
            try:
                self.dispatcher.call_sync(hook_rpc, timeout=300)
            except RpcException:
                pass
            return

        if 'launchd' in svc:
            launchd = svc['launchd']
            plists = [launchd] if isinstance(launchd, dict) else launchd

            for i in plists:
                self.dispatcher.call_sync('serviced.job.restart',
                                          i['Label'],
                                          timeout=300)

            return

    @private
    @accepts(str, bool, bool)
    def apply_state(self, service, restart=False, reload=False):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        state, _, pid = get_status(self.dispatcher, self.datastore, svc)
        node = ConfigNode('service.{0}'.format(service), self.configstore)

        if node['enable'].value and state != 'RUNNING':
            logger.info('Starting service {0}'.format(service))
            self.dispatcher.call_sync('service.ensure_started',
                                      service,
                                      timeout=120)

        elif not node['enable'].value and state != 'STOPPED':
            logger.info('Stopping service {0}'.format(service))
            self.dispatcher.call_sync('service.ensure_stopped',
                                      service,
                                      timeout=120)

        else:
            if restart:
                logger.info('Restarting service {0}'.format(service))
                self.dispatcher.call_sync('service.restart',
                                          service,
                                          timeout=120)
            elif reload:
                logger.info('Reloading service {0}'.format(service))
                self.dispatcher.call_sync('service.reload',
                                          service,
                                          timeout=120)
예제 #22
0
class ServiceInfoProvider(Provider):
    @description("Lists available services")
    @query("service")
    @generator
    def query(self, filter=None, params=None):
        def extend(i):
            state, pid = get_status(self.dispatcher, self.datastore, i)
            entry = {
                'id': i['id'],
                'name': i['name'],
                'state': state,
            }

            if pid is not None:
                entry['pid'] = pid

            entry['builtin'] = i['builtin']
            return entry

        # Running extend sequentially might take too long due to the number of services
        # and `service ${name} onestatus`. To workaround that run it in parallel using gevent
        result = self.datastore.query('service_definitions', *(filter or []))
        if result is None:
            return result
        jobs = {gevent.spawn(extend, entry): entry for entry in result}
        gevent.joinall(list(jobs.keys()), timeout=15)
        group = gevent.pool.Group()

        def result(greenlet):
            if greenlet.value is None:
                entry = jobs.get(greenlet)
                return {
                    'name': entry['name'],
                    'state': 'UNKNOWN',
                    'builtin': entry['builtin'],
                }
            else:
                return greenlet.value

        result = group.map(result, jobs)
        result = list(
            map(
                lambda s: extend_dict(
                    s, {'config': self.get_service_config(s['id'])}), result))
        return q.query(result, *(filter or []), stream=True, **(params or {}))

    @accepts(str)
    @returns(h.object())
    def get_service_config(self, id):
        svc = self.datastore.get_by_id('service_definitions', id)
        if not svc:
            raise RpcException(errno.EINVAL, 'Invalid service name')

        if svc.get('get_config_rpc'):
            ret = self.dispatcher.call_sync(svc['get_config_rpc'])
        else:
            ret = ConfigNode('service.{0}'.format(svc['name']),
                             self.configstore).__getstate__()

        if not ret:
            return

        return extend_dict(ret, {'type': 'service-{0}'.format(svc['name'])})

    @private
    @accepts(str)
    def ensure_started(self, service):
        # XXX launchd!
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if 'rcng' not in svc:
            if 'start_rpc' in svc:
                try:
                    self.dispatcher.call_sync(svc['start_rpc'])
                    return True
                except RpcException:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst starting service {0} rpc '{1}' failed".format(
                            service, svc['start_rpc']))
            else:
                return

        rc_scripts = svc['rcng']['rc-scripts']

        if type(rc_scripts) is str:
            try:
                system("/usr/sbin/service", rc_scripts, 'onestart')
            except SubprocessException as e:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst starting service {0} command 'service {1} onestart'"
                    .format(service, rc_scripts) +
                    " failed with error: {0}".format(e.err))
        elif type(rc_scripts) is list:
            for i in rc_scripts:
                try:
                    system("/usr/sbin/service", i, 'onestart')
                except SubprocessException as e:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst starting service {0} command 'service {1} onestart'"
                        .format(service, i) +
                        " failed the following error occured: {0}".format(
                            e.err))

    @private
    @accepts(str)
    def ensure_stopped(self, service):
        # XXX launchd!
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if 'rcng' not in svc:
            if 'stop_rpc' in svc:
                try:
                    self.dispatcher.call_sync(svc['stop_rpc'])
                    return True
                except RpcException:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst starting service {0} rpc '{1}' failed".format(
                            service, svc['stop_rpc']))
            else:
                return

        rc_scripts = svc['rcng']['rc-scripts']

        if type(rc_scripts) is str:
            try:
                system("/usr/sbin/service", rc_scripts, 'onestop')
            except SubprocessException as e:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst stopping service {0} command 'service {1} onestop'"
                    .format(service, rc_scripts) +
                    " failed with error: {0}".format(e.err))

        elif type(rc_scripts) is list:
            for i in rc_scripts:
                try:
                    system("/usr/sbin/service", i, 'onestop')
                except SubprocessException as e:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst stopping service {0} command 'service {1} onestop'"
                        .format(service, i) +
                        " failed the following error occured: {0}".format(
                            e.err))

    @private
    @accepts(str)
    def reload(self, service):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        status = self.query([('name', '=', service)], {'single': True})
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        rc_scripts = svc['rcng']['rc-scripts']
        reload_scripts = svc['rcng'].get('reload', rc_scripts)

        if status['state'] != 'RUNNING':
            return

        if type(rc_scripts) is str:
            try:
                system("/usr/sbin/service", rc_scripts, 'onereload')
            except SubprocessException as e:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst reloading service {0} command 'service {1} onereload'"
                    .format(service, rc_scripts) +
                    " failed with error: {0}".format(e.err))

        elif type(rc_scripts) is list:
            for i in rc_scripts:
                if i not in reload_scripts:
                    continue

                try:
                    system("/usr/sbin/service", i, 'onereload')
                except SubprocessException as e:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst reloading service {0} command 'service {1} onereload'"
                        .format(service, i) +
                        " failed the following error occured: {0}".format(
                            e.err))

    @private
    @accepts(str)
    def restart(self, service):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        status = self.query([('name', '=', service)], {'single': True})
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        if status['state'] != 'RUNNING':
            return

        hook_rpc = svc.get('restart_rpc')
        if hook_rpc:
            try:
                self.dispatcher.call_sync(hook_rpc)
            except RpcException:
                pass
            return

        rc_scripts = svc['rcng']['rc-scripts']

        if type(rc_scripts) is str:
            try:
                system("/usr/sbin/service", rc_scripts, 'onerestart')
            except SubprocessException as e:
                raise RpcException(
                    errno.ENOENT,
                    "Whilst restarting service {0} command 'service {1} onerestart'"
                    .format(service, rc_scripts) +
                    " failed with error: {0}".format(e.err))

        elif type(rc_scripts) is list:
            for i in rc_scripts:
                try:
                    system("/usr/sbin/service", i, 'onerestart')
                except SubprocessException as e:
                    raise RpcException(
                        errno.ENOENT,
                        "Whilst restarting service {0} command 'service {1} onerestart'"
                        .format(service, i) +
                        " failed the following error occured: {0}".format(
                            e.err))

    @private
    @accepts(str, bool, bool)
    def apply_state(self, service, restart=False, reload=False):
        svc = self.datastore.get_one('service_definitions',
                                     ('name', '=', service))
        if not svc:
            raise RpcException(errno.ENOENT,
                               'Service {0} not found'.format(service))

        state, pid = get_status(self.dispatcher, self.datastore, svc)
        node = ConfigNode('service.{0}'.format(service), self.configstore)

        if node['enable'].value and state != 'RUNNING':
            logger.info('Starting service {0}'.format(service))
            self.dispatcher.call_sync('service.ensure_started',
                                      service,
                                      timeout=120)

        elif not node['enable'].value and state != 'STOPPED':
            logger.info('Stopping service {0}'.format(service))
            self.dispatcher.call_sync('service.ensure_stopped',
                                      service,
                                      timeout=120)

        else:
            if restart:
                logger.info('Restarting service {0}'.format(service))
                self.dispatcher.call_sync('service.restart',
                                          service,
                                          timeout=120)
            elif reload:
                logger.info('Reloading service {0}'.format(service))
                self.dispatcher.call_sync('service.reload',
                                          service,
                                          timeout=120)