def complete(self, context, **kwargs): props = [] name = q.get(kwargs, 'kwargs.image') if name: image = context.entity_subscribers['docker.image'].query(('names', 'in', name), single=True) if not image: image = q.query(DockerImageNamespace.default_images, ('name', '=', name), single=True) if image and image['presets']: presets = image['presets'] props += [NullComplete('{id}='.format(**i)) for i in presets['settings']] props += [NullComplete('volume:{container_path}='.format(**v)) for v in presets['volumes']] props += [NullComplete('port:{container_port}/{protocol}='.format(**v)) for v in presets['ports']] available_images = q.query(DockerImageNamespace.default_images, select='name') available_images += context.entity_subscribers['docker.image'].query(select='names.0') available_images = list(set(available_images)) return props + [ NullComplete('name='), NullComplete('command='), NullComplete('hostname='), NullComplete('volume:'), NullComplete('port:'), EnumComplete('image=', available_images), EntitySubscriberComplete('host=', 'docker.host', lambda i: q.get(i, 'name')), EnumComplete('interactive=', ['yes', 'no']), EnumComplete('autostart=', ['yes', 'no']), EnumComplete('expose_ports=', ['yes', 'no']), ]
def query(self, *filter, **params): if 'timeout' in params and params.get('single'): timeout = params.pop('timeout') with self.cv: return self.cv.wait_for(lambda: q.query(list(self.items.values()), *filter, **params), timeout) if self.remote or params.get('remote'): return self.client.call_sync('{0}.query'.format(self.name), filter, params) with self.cv: return q.query(list(self.items.values()), *filter, **params)
def query(self, filter=None, params=None): return q.query( list(self.dispatcher.call_sync('dscached.management.get_realms')) + self.datastore.query('kerberos.realms'), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(i): lazy_status = lazy(get_status, self.dispatcher, self.datastore, i) entry = { 'id': i['id'], 'name': i['name'], 'dependencies': i.get('dependencies', []), 'labels': None, 'state': lazy(lambda: lazy_status()[0]), 'error': lazy(lambda: lazy_status()[1]), } if 'launchd' in i: launchd = i['launchd'] jobs = [launchd] if isinstance(launchd, dict) else launchd entry['labels'] = [j['Label'] for j in jobs] entry['pid'] = lazy(lambda: lazy_status()[2]), entry['builtin'] = i['builtin'] entry['config'] = lazy(self.get_service_config, i['id']) return entry return q.query(self.datastore.query_stream('service_definitions', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): if params and params.get('count'): return None # Common use cases optimization if filter and len(filter) == 1 and params and params.get('single'): key, op, value = filter[0] if op == '=': if value is None: return None try: if key == 'id': return self.dispatcher.call_sync('dscached.account.getpwuuid', value) if key == 'uid': return self.dispatcher.call_sync('dscached.account.getpwuid', value) if key == 'username': return self.dispatcher.call_sync('dscached.account.getpwnam', value) except RpcException as err: if err.code == errno.ENOENT: return None raise return q.query( self.dispatcher.call_sync('dscached.account.query', filter, params), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): ctx = krb5.Context() def extend(keytab): keytab['entries'] = [] try: kt = krb5.Keytab(ctx, contents=keytab['keytab']) for i in kt.entries: keytab['entries'].append({ 'vno': i.vno, 'principal': i.principal, 'enctype': i.enctype }) except krb5.KrbException: pass del keytab['keytab'] return keytab return q.query( self.datastore.query_stream('kerberos.keytabs', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): if not self.is_ipmi_loaded(): raise RpcException(errno.ENXIO, 'The IPMI device could not be found') result = [] for channel in self.channels(): try: out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel)) except SubprocessException as e: raise RpcException( errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format( e.err.strip())) raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)} ret = { IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP } ret['id'] = channel ret['vlan_id'] = None if ret.get( 'vlan_id') == 'Disabled' else ret.get('vlan_id') ret['dhcp'] = True if ret.get('dhcp') == 'DHCP Address' else False result.append(ret) return q.query(result, *(filter or []), stream=True, **(params or {}))
def run(self): # dict containing service enable flags for all services fn9_services = { srv['srv_service']: srv for srv in get_table('select * from services_services').values() } fn10_services = list(self.dispatcher.call_sync('service.query')) # Migrating AFP service fn9_afp = get_table('select * from services_afp', dictionary=False)[0] try: self.run_subtask_sync( 'service.update', q.query(fn10_services, ("name", "=", "afp"), single=True)['id'], {'config': { 'enable': bool(fn9_services['afp']['srv_enable']), 'guest_enable': bool(fn9_afp['afp_srv_guest']), 'guest_user': fn9_afp['afp_srv_guest_user'], 'bind_addresses': [ i.strip() for i in fn9_afp['afp_srv_bindip'].split(',') if (i and not i.isspace()) ] or None, 'connections_limit': fn9_afp['afp_srv_connections_limit'], 'homedir_enable': True if fn9_afp['afp_srv_homedir_enable'] in ('True', 1) else False, 'homedir_path': fn9_afp['afp_srv_homedir'] or None, 'homedir_name': fn9_afp['afp_srv_homename'] or None, 'dbpath': fn9_afp['afp_srv_dbpath'] or None, 'auxiliary': fn9_afp['afp_srv_global_aux'] or None }} ) except RpcException as err: self.add_warning(TaskWarning( errno.EINVAL, 'Could not update AFP service settings due to err: {0}'.format(err) ))
def query(self, filter=None, params=None): def extend(certificate): if certificate['type'].startswith('CA_'): cert_path = '/etc/certificates/CA' else: cert_path = '/etc/certificates' if certificate.get('certificate'): certificate['certificate_path'] = os.path.join( cert_path, '{0}.crt'.format(certificate['name'])) if certificate.get('privatekey'): certificate['privatekey_path'] = os.path.join( cert_path, '{0}.key'.format(certificate['name'])) if certificate.get('csr'): certificate['csr_path'] = os.path.join( cert_path, '{0}.csr'.format(certificate['name'])) return certificate return q.query( self.datastore.query_stream('crypto.certificates', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(stat): split_name = stat['name'].split('.', 3) if 'df' in stat['name']: stat['short_name'] = dash_to_underscore( split_name[1].split('-', 1)[1] + '-' + split_name[2].split('-', 1)[1] ) elif 'load' in stat['name']: stat['short_name'] = dash_to_underscore(split_name[1] + '-' + split_name[3]) else: stat['short_name'] = dash_to_underscore(split_name[2]) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync( 'stat.query', [ ['or', [('name', '~', 'load'), ('name', '~', 'processes'), ('name', '~', 'memory'), ('name', '~', 'df')]], ['nor', [('name', '~', 'zfs')]] ] ) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(share): path = None try: path = self.translate_path(share['id']) except RpcException: pass def get_perms(): if share['target_type'] in ('DIRECTORY', 'DATASET', 'FILE'): perms = self.dispatcher.call_sync('filesystem.stat', path) return perms['permissions'] def get_perm_type(): if share['target_type'] == 'DATASET': return self.dispatcher.call_sync( 'volume.dataset.query', [('id', '=', share['target_path'])], {'select': 'permissions_type', 'single': True} ) share['filesystem_path'] = path share['permissions_type'] = lazy(get_perm_type) share['permissions'] = lazy(get_perms) return share return q.query( self.datastore.query_stream('shares', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): return q.query( self.dispatcher.call_sync('peer.query', [('type', '=', 'freenas')]), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): return q.query( self.dispatcher.call_sync('peer.query', [('type', '=', 'vmware')]), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(certificate): if certificate['type'].startswith('CA_'): cert_path = '/etc/certificates/CA' else: cert_path = '/etc/certificates' if certificate.get('certificate'): certificate['certificate_path'] = os.path.join( cert_path, '{0}.crt'.format(certificate['name'])) if certificate.get('privatekey'): certificate['privatekey'] = Password(certificate['privatekey']) certificate['privatekey_path'] = os.path.join( cert_path, '{0}.key'.format(certificate['name'])) if certificate.get('csr'): certificate['csr_path'] = os.path.join( cert_path, '{0}.csr'.format(certificate['name'])) return certificate return q.query(self.datastore.query_stream('crypto.certificates', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(i): lazy_status = lazy(get_status, self.dispatcher, self.datastore, i) entry = { 'id': i['id'], 'name': i['name'], 'dependencies': i.get('dependencies', []), 'labels': None, 'state': lazy(lambda: lazy_status()[0]), 'error': lazy(lambda: lazy_status()[1]), } if 'launchd' in i: launchd = i['launchd'] jobs = [launchd] if isinstance(launchd, dict) else launchd entry['labels'] = [j['Label'] for j in jobs] entry['pid'] = lazy(lambda: lazy_status()[2]), entry['builtin'] = i['builtin'] entry['config'] = lazy(self.get_service_config, i['id']) return entry return q.query( self.datastore.query_stream('service_definitions', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): return q.query( self.dispatcher.call_sync('scheduler.management.query'), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): return q.query( list(self.dispatcher.call_sync('dscached.management.get_realms')) + self.datastore.query('kerberos.realms'), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def collect(): for regtype in KNOWN_SERVICES: for svc in self.dispatcher.call_sync('neighbord.discovery.find', regtype): svc['service'] = regtype yield svc return q.query(collect(), *(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend(backup): return backup return q.query(self.datastore.query_stream('backup', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): ds_results = self.context.datastore.query_stream('syslog', *(filter or []), **(params or {})) \ if self.context.datastore \ else [] return q.query(itertools.chain(ds_results, self.context.store), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def collect(): for regtype in KNOWN_SERVICES: for svc in self.dispatcher.call_sync( 'neighbord.discovery.find', regtype): svc['service'] = regtype yield svc return q.query(collect(), *(filter or []), **(params or {}))
def query(self, filter=None, params=None): order = self.configstore.get('alert.filter.order') def extend(obj): obj['index'] = order.index(obj['id']) return obj filters = self.datastore.query('alert.filters', ('id', 'in', order), callback=extend) return q.query(filters, *(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend(directory): with contextlib.suppress(RpcException): directory["status"] = self.dispatcher.call_sync("dscached.management.get_status", directory["id"]) return directory return q.query( self.datastore.query_stream("directories", callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(backup): return backup return q.query( self.datastore.query_stream('backup', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): ds_results = self.context.datastore.query_stream('syslog', *(filter or []), **(params or {})) \ if self.context.datastore \ else [] return q.query( itertools.chain(ds_results, self.context.store), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'amazon-s3')): i['status'] = lazy(self.get_status, i['id']) yield i return q.query(extend_query(), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(peer): peer['status'] = peers_status.get(peer['id'], {'state': 'UNKNOWN', 'rtt': None}) return peer return q.query( self.datastore.query_stream('peers', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def collect(): for p in list(self.dispatcher.plugins.values()): if p.metadata and p.metadata.get('type') == 'alert_emitter': config = self.dispatcher.call_sync('alert.emitter.{0}.get_config'.format(p.metadata['name'])) yield { 'id': p.metadata['id'], 'name': p.metadata['name'], 'config': config } return q.query(collect(), *(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'freenas')): i['status'] = lazy(self.get_status, i['id']) yield i return q.query( extend_query(), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(peer): peer['status'] = peers_status.get(peer['id'], { 'state': 'UNKNOWN', 'rtt': None }) return peer return q.query(self.datastore.query_stream('peers', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(directory): directory['status'] = lazy(self.dispatcher.call_sync, 'dscached.management.get_status', directory['id']) return directory return q.query(self.datastore.query_stream('directories', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(directory): with contextlib.suppress(RpcException): directory['status'] = self.dispatcher.call_sync('dscached.management.get_status', directory['id']) return directory return q.query( self.datastore.query('directories', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend_query(): for t in self.peer_types(): for peer in self.dispatcher.call_sync(f'peer.{t}.query', [], {'exclude': 'status'}): peer['status'] = peers_status.get(peer['id'], {'state': 'UNKNOWN', 'rtt': None}) yield peer return q.query( extend_query(), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend(stat): split_name = stat['name'].split('.', 3) stat['short_name'] = dash_to_underscore( split_name[1] + '-' + split_name[3] + '-' + split_name[2].split('_', 2)[1] ) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'interface')]) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(directory): for k, v in directory['parameters'].items(): if k == 'password': directory['parameters'][k] = Password(v) directory['status'] = lazy(self.dispatcher.call_sync, 'dscached.management.get_status', directory['id']) return directory return q.query( self.datastore.query_stream('directories', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): def extend_query(): for t in self.peer_types(): for peer in self.dispatcher.call_sync(f'peer.{t}.query', [], {'exclude': 'status'}): peer['status'] = peers_status.get(peer['id'], { 'state': 'UNKNOWN', 'rtt': None }) yield peer return q.query(extend_query(), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(stat): split_name = stat['name'].split('.', 3) stat['short_name'] = dash_to_underscore( split_name[1] + '-' + split_name[3] + '-' + split_name[2].split('_', 2)[1]) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'interface')]) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def collect(): db = sqlite3.connect(PKGDB_FILE, isolation_level=None) try: cursor = db.cursor() for row in cursor.execute('SELECT * FROM packages'): yield { 'id': row[0], 'name': row[2], 'version': row[3] } finally: db.close() return q.query(collect(), *(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend(stat): type = stat['name'].split('.', 3)[2] if 'aggregation' in stat['name']: stat['short_name'] = dash_to_underscore('aggregated-' + type) else: stat['short_name'] = dash_to_underscore('cpu-' + re.search(r'\d+', stat['name']).group() + '-' + type) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'cpu')]) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend(stat): split_name = stat['name'].split('.', 3) short_name = f'{split_name[1]}_{split_name[3]}' if '_' in split_name[2]: short_name += '_{}'.format(split_name[2].split('_')[-1]) stat['short_name'] = dash_to_underscore(short_name) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'disk')]) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): drivers = self.supported_drivers() def extend(obj): obj['capabilities'] = drivers[obj['type']] return obj def doit(): for i in drivers: with contextlib.suppress(BaseException): for d in self.dispatcher.call_sync('vm.datastore.{0}.discover'.format(i)): yield extend(d) yield from self.datastore.query_stream('vm.datastores', callback=extend) return q.query(doit(), *(filter or []), **(params or {}))
def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')): password = q.get(i, 'credentials.password') if password: q.set(i, 'credentials.password', Password(password)) i['status'] = lazy(self.get_status, i['id']) yield i return q.query(extend_query(), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): ifaces = self.dispatcher.call_sync('networkd.configuration.query_interfaces') def extend(i): try: i['status'] = ifaces[i['id']] except KeyError: # The given interface is either removed or disconnected return None return i return q.query( self.datastore.query_stream('network.interfaces', callback=extend), *(filter or []), stream=True, **(params or {}) )
def query(self, filter=None, params=None): ifaces = self.dispatcher.call_sync( 'networkd.configuration.query_interfaces') def extend(i): try: i['status'] = ifaces[i['id']] except KeyError: # The given interface is either removed or disconnected return None return i return q.query(self.datastore.query_stream('network.interfaces', callback=extend), *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')): password = q.get(i, 'credentials.password') if password: q.set(i, 'credentials.password', Password(password)) i['status'] = lazy(self.get_status, i['id']) yield i return q.query( extend_query(), *(filter or []), stream=True, **(params or {}) )
def lazy_query(self, filter=None, params=None): def extend(obj): def doit(): time.sleep(1) return 'I am so slow: {0}'.format(obj['id']) def doit2(): time.sleep(1) return {'foo': obj['id'] + 1, 'bar': obj['id'] + 2} obj['fast_value'] = obj['id'] * 5 obj['slow_value'] = lazy(doit) obj['composite_slow_value'] = lazy(doit2) return obj gen = ({'id': i} for i in range(0, 10)) return q.query(gen, *(filter or []), callback=extend, **(params or {}))
def query(self, filter=None, params=None): def extend(obj): for k, v in obj.get('schedule', {}).items(): if k == 'timezone': continue if v != '*': return obj obj['schedule'] = None return obj return q.query( self.dispatcher.call_sync('scheduler.management.query'), *(filter or []), stream=True, **(params or {}), callback=extend )
def attr_query(self): class Test(object): pass c = Test() d = {} q.set(c, 'f', True) q.set(d, 'f2', Test()) q.set(d, 'f2.nested', True) if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(q.get(d, 'f2'), Test): l = [d, c] if q.contains(c, 'f'): q.delete(c, 'f') return bool(q.query(l, ('f2.nested', '=', True), count=True)) return False
def query(self, filter=None, params=None): def extend(stat): type = stat['name'].split('.', 3)[2] if 'aggregation' in stat['name']: stat['short_name'] = dash_to_underscore('aggregated-' + type) else: stat['short_name'] = dash_to_underscore( 'cpu-' + re.search(r'\d+', stat['name']).group() + '-' + type) normalize_values(stat) return stat raw_stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'cpu')]) stats = map(extend, raw_stats) return q.query(stats, *(filter or []), stream=True, **(params or {}))
def query(self, filter=None, params=None): if not self.is_ipmi_loaded(): raise RpcException(errno.ENXIO, 'The IPMI device could not be found') result = [] for channel in self.channels(): try: out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel)) except SubprocessException as e: raise RpcException(errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format(e.err.strip())) raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)} ret = {IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP} ret['id'] = channel ret['vlan_id'] = None if ret.get('vlan_id') == 'Disabled' else ret.get('vlan_id') ret['dhcp'] = True if ret.get('dhcp') == 'DHCP Address' else False result.append(ret) return q.query(result, *(filter or []), stream=True, **(params or {}))
def lazy_query(self, filter=None, params=None): def extend(obj): def doit(): time.sleep(1) return 'I am so slow: {0}'.format(obj['id']) def doit2(): time.sleep(1) return { 'foo': obj['id'] + 1, 'bar': obj['id'] + 2 } obj['fast_value'] = obj['id'] * 5 obj['slow_value'] = lazy(doit) obj['composite_slow_value'] = lazy(doit2) return obj gen = ({'id': i} for i in range(0, 10)) return q.query(gen, *(filter or []), callback=extend, **(params or {}))
def query(self, filter=None, params=None): def extend(share): perms = None path = None try: path = self.translate_path(share['id']) if share['target_type'] in ('DIRECTORY', 'DATASET', 'FILE'): perms = self.dispatcher.call_sync('filesystem.stat', path) except RpcException: pass share['filesystem_path'] = path share['permissions'] = perms['permissions'] if perms else None return share return q.query( self.datastore.query_stream('shares', callback=extend), *(filter or []), stream=True, **(params or {}) )