def find_share_namespace(context, task): if task['name'] == 'share.create': share_type = get(task, 'args.0.type') elif task['name'] == 'share.update': share_id = get(task, 'args.0') share_type = context.entity_subscribers['share'].query(('id', '=', share_id), single=True) else: return if share_type == 'smb': return SMBSharesNamespace if share_type == 'nfs': return NFSSharesNamespace if share_type == 'afp': return AFPSharesNamespace if share_type == 'webdav': return WebDAVSharesNamespace if share_type == 'iscsi': return ISCSISharesNamespace
def select_fn(fn, obj): obj = fn(obj) if fn else obj if isinstance(select, (list, tuple)): return [get(obj, i) for i in select] if isinstance(select, str): return get(obj, select)
def on_update(self, old_entity, new_entity): for cwd in self.context.ml.path: if isinstance(cwd, SingleItemNamespace) and cwd.parent == self: if q.get(old_entity, self.primary_key_name) == q.get(cwd.entity, self.primary_key_name): q.set(cwd.entity, self.primary_key_name, q.get(new_entity, self.primary_key_name)) cwd.load() if not cwd.entity: self.context.ml.cd_up()
def convert_group(self, entry): entry = dict(entry['attributes']) return { 'id': self.get_id(entry), 'gid': int(get(entry, 'gidNumber')), 'sid': get(entry, 'sambaSID'), 'name': get(entry, 'cn.0'), 'builtin': False, 'sudo': False }
def modes_to_oct(modes): result = 0 if get(modes, 'user.read'): result |= stat.S_IRUSR if get(modes, 'user.write'): result |= stat.S_IWUSR if get(modes, 'user.execute'): result |= stat.S_IXUSR if get(modes, 'group.read'): result |= stat.S_IRGRP if get(modes, 'group.write'): result |= stat.S_IWGRP if get(modes, 'group.execute'): result |= stat.S_IXGRP if get(modes, 'others.read'): result |= stat.S_IROTH if get(modes, 'others.write'): result |= stat.S_IWOTH if get(modes, 'others.execute'): result |= stat.S_IXOTH return result
def get_id(self, entry): checksum = crc32(dn_to_domain(self.parameters['base_dn'])) if 'entryUUID' in entry: return get(entry, 'entryUUID') if 'uidNumber' in entry: return str(uuid2(checksum, int(get(entry, 'uidNumber')))) if 'gidNumber' in entry: return str(uuid2(checksum, int(get(entry, 'gidNumber')))) return str(uuid.uuid4())
def save(self, this, new=False): if new: if not q.contains(self.parent.entity, self.parent_path): q.set(self.parent.entity, self.parent_path, []) q.get(self.parent.entity, self.parent_path).append(this.entity) else: entity = first_or_default( lambda a: a[self.primary_key_name] == this.entity[self.primary_key_name], q.get(self.parent.entity, self.parent_path) ) entity.update(this.entity) return self.parent.save()
def save(self, this, new=False): if new: if not q.contains(self.parent.entity, self.parent_path): q.set(self.parent.entity, self.parent_path, []) q.get(self.parent.entity, self.parent_path).append(this.entity) else: entity = first_or_default( lambda a: a[self.primary_key_name] == this.entity[ self.primary_key_name], q.get(self.parent.entity, self.parent_path)) entity.update(this.entity) return self.parent.save()
def convert_user(self, entry): if not entry: return entry = dict(entry['attributes']) if 'user' not in get(entry, 'objectClass'): # not a user return if 'computer' in get(entry, 'objectClass'): # not a user return username = get(entry, 'sAMAccountName') usersid = get(entry, 'objectSid') groups = [] try: wbu = self.wbc.get_user( name='{0}\\{1}'.format(self.realm, username)) except: return if not wbu: logging.warning( 'User {0} found in LDAP, but not in winbindd.'.format( username)) return if get(entry, 'memberOf'): builder = LdapQueryBuilder() qstr = builder.build_query([('distinguishedName', 'in', get(entry, 'memberOf'))]) for r in self.search(self.base_dn, qstr): r = dict(r['attributes']) guid = uuid.UUID(get(r, 'objectGUID')) groups.append(str(guid)) return { 'id': str(uuid.UUID(get(entry, 'objectGUID'))), 'sid': str(usersid), 'uid': wbu.passwd.pw_uid, 'builtin': False, 'username': username, 'aliases': [wbu.passwd.pw_name], 'full_name': get(entry, 'name'), 'email': None, 'locked': False, 'sudo': False, 'password_disabled': False, 'group': str(self.domain_users_guid), 'groups': groups, 'shell': wbu.passwd.pw_shell, 'home': wbu.passwd.pw_dir }
def directory_exists(self, datastore_id, datastore_path): ds = self.datastore.get_by_id('vm.datastores', datastore_id) if ds['type'] != 'local': raise RpcException(errno.EINVAL, 'Invalid datastore type') return os.path.exists( os.path.join(q.get(ds, 'properties.path'), datastore_path))
def run(self, id, updated_fields): share = self.datastore.get_by_id('shares', id) share.update(updated_fields) if share['target_type'] != 'DATASET' and q.get(share, 'properties.alldirs'): raise TaskException( errno.EINVAL, 'alldirs can be only used with dataset shares') if share['properties']['security'] and not self.dispatcher.call_sync( 'service.query', [('name', '=', 'nfs')], {'single': True})['config']['v4']: self.add_warning( TaskWarning( errno.ENXIO, "NFS security option requires NFSv4 support to be enabled in NFS service settings." )) self.datastore.update('shares', id, share) self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs') self.dispatcher.call_sync('service.reload', 'nfs', timeout=60) self.dispatcher.dispatch_event('share.nfs.changed', { 'operation': 'update', 'ids': [id] })
def run(self, id): peer = self.datastore.get_by_id('peers', id) if not peer: raise TaskException(errno.ENOENT, 'Peer entry {0} does not exist'.format(id)) remote = q.get(peer, 'credentials.address') remote_client = None hostid = self.dispatcher.call_sync('system.info.host_uuid') try: try: remote_client = get_freenas_peer_client(self, remote) call_task_and_check_state(remote_client, 'peer.freenas.delete_local', hostid, False) except RpcException as e: self.add_warning( TaskWarning( e.code, 'Remote {0} is unreachable. Delete operation is performed at local side only.' .format(remote))) except ValueError as e: self.add_warning(TaskWarning(errno.EINVAL, str(e))) self.join_subtasks( self.run_subtask('peer.freenas.delete_local', id, True)) finally: if remote_client: remote_client.disconnect()
def run(self, directory): try: params = self.dispatcher.call_sync( "dscached.management.normalize_parameters", directory["type"], directory.get("parameters", {}) ) except RpcException as err: raise TaskException(err.code, err.message) if self.datastore.exists("directories", ("name", "=", directory["name"])): raise TaskException(errno.EEXIST, "Directory {0} already exists".format(directory["name"])) normalize( directory, {"enabled": False, "enumerate": True, "immutable": False, "uid_range": None, "gid_range": None} ) # Replace passed in params with normalized ones directory["parameters"] = params if directory["type"] == "winbind": normalize(directory, {"uid_range": [100000, 999999], "gid_range": [100000, 999999]}) smb = self.dispatcher.call_sync("service.query", [("name", "=", "smb")], {"single": True}) if not q.get(smb, "config.enable"): q.set(smb, "config.enable", True) self.join_subtasks(self.run_subtask("service.update", smb["id"], smb)) self.id = self.datastore.insert("directories", directory) self.dispatcher.call_sync("dscached.management.configure_directory", self.id) self.dispatcher.dispatch_event("directory.changed", {"operation": "create", "ids": [self.id]}) node = ConfigNode("directory", self.configstore) node["search_order"] = node["search_order"].value + [directory["name"]] self.dispatcher.call_sync("dscached.management.reload_config") return self.id
def on_pool_change(args): with dispatcher.get_lock('bootenvs'): if args['operation'] != 'update': return for i in args['entities']: if i['id'] != boot_pool_name: continue dispatcher.dispatch_event('boot.pool.changed', {'operation': 'update'}) be = bootenvs.query(('on_reboot', '=', True), single=True) be_realname = q.get(i, 'properties.bootfs.value').split('/')[-1] if be and be_realname == be['realname']: return if be: be['on_reboot'] = False bootenvs.put(be['id'], be) new_be = bootenvs.query(('realname', '=', be_realname), single=True) new_be['on_reboot'] = True bootenvs.put(new_be['id'], new_be)
def apply(obj, ds): for dev in obj['devices']: if dev.get('type') == 'DISK': if q.get(dev, 'properties.target_type') == 'ZVOL': q.set(dev, 'properties.target_type', 'BLOCK') return obj
def run(self, disk): pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool') guid = q.get(pool, 'groups.data.0.guid') disk_id = self.dispatcher.call_sync('disk.path_to_id', disk) # Format disk self.run_subtask_sync('disk.format.boot', disk_id) self.set_progress(20) # Attach disk to the pool boot_pool_name = self.configstore.get('system.boot_pool_name') self.run_subtask_sync( 'zfs.pool.extend', boot_pool_name, None, [{ 'target_guid': guid, 'vdev': { 'type': 'disk', 'path': os.path.join('/dev', disk + 'p2') } }], progress_callback=lambda p, m, e: self.chunk_progress(20, 80, '', p, m, e) ) self.set_progress(80) # Install grub disk_id = self.dispatcher.call_sync('disk.path_to_id', disk) self.run_subtask_sync('disk.install_bootloader', disk_id) self.set_progress(100)
def run(self, disk): pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool') guid = q.get(pool, 'groups.data.0.guid') disk_id = self.dispatcher.call_sync('disk.path_to_id', disk) # Format disk self.run_subtask_sync('disk.format.boot', disk_id) self.set_progress(20) # Attach disk to the pool boot_pool_name = self.configstore.get('system.boot_pool_name') self.run_subtask_sync( 'zfs.pool.extend', boot_pool_name, None, [{ 'target_guid': guid, 'vdev': { 'type': 'disk', 'path': os.path.join('/dev', disk + 'p2') } }], progress_callback=lambda p, m, e: self.chunk_progress( 20, 80, '', p, m, e)) self.set_progress(80) # Install grub disk_id = self.dispatcher.call_sync('disk.path_to_id', disk) self.run_subtask_sync('disk.install_bootloader', disk_id) self.set_progress(100)
def on_pool_change(args): with dispatcher.get_lock('bootenvs'): if args['operation'] != 'update': return for i in args['entities']: if i['id'] != boot_pool_name: continue dispatcher.dispatch_event('boot.pool.changed', { 'operation': 'update' }) be = bootenvs.query(('on_reboot', '=', True), single=True) be_realname = q.get(i, 'properties.bootfs.value').split('/')[-1] if be and be_realname == be['realname']: return if be: be['on_reboot'] = False bootenvs.put(be['id'], be) new_be = bootenvs.query(('realname', '=', be_realname), single=True) new_be['on_reboot'] = True bootenvs.put(new_be['id'], new_be)
def describe_task_state(task): if task['state'] == 'EXECUTING': if 'progress' not in task: return task['state'] progress = get(task, 'progress.percentage') if progress is None: progress = 0 return '{0:2.0f}% ({1})'.format(progress, get(task, 'progress.message')) if task['state'] == 'FAILED': return 'Failed: {0}'.format(task['error']['message']) return task['state']
def do_get(self, obj): if self.create_arg or self.condition and not self.condition(obj): return None if isinstance(self.get, collections.Callable): return self.get(obj) return q.get(obj, self.get)
def get_related(context, name, obj, field): id = get(obj, field) thing = context.entity_subscribers[name].query(('id', '=', id), single=True) if not thing: return None return thing['name']
def convert_user(self, entry): if not entry: return dn = entry['dn'] entry = dict(entry['attributes']) if 'user' not in get(entry, 'objectClass'): # not a user return if 'computer' in get(entry, 'objectClass'): # not a user return username = get(entry, 'sAMAccountName') usersid = get(entry, 'objectSid') groups = [] uid = self.mapper.get_uid(entry) if uid is None: return if get(entry, 'memberOf'): builder = LdapQueryBuilder() qstr = builder.build_query([('member', '=', dn), ('objectClass', '=', 'group')]) for r in self.search(self.base_dn, qstr, attributes=['objectGUID', 'objectSid']): r = dict(r['attributes']) guid = uuid.UUID(get(r, 'objectGUID')) groups.append(str(guid)) # Append wheel group to users being in Domain Admins group if r['objectSid'] == self.domain_admins_sid and self.wheel_group: groups.append(self.wheel_group['id']) return { 'id': str(uuid.UUID(get(entry, 'objectGUID'))), 'sid': str(usersid), 'uid': uid, 'builtin': False, 'username': username, 'aliases': [f'{self.workgroup}\\{username}'], 'full_name': get(entry, 'name'), 'email': None, 'locked': False, 'sudo': False, 'password_disabled': False, 'group': str(self.domain_users_guid), 'groups': groups, 'shell': '/bin/sh', 'home': self.context.get_home_directory(self.directory, username) }
def convert_user(self, entry): if not entry: return dn = entry['dn'] entry = dict(entry['attributes']) if 'user' not in get(entry, 'objectClass'): # not a user return if 'computer' in get(entry, 'objectClass'): # not a user return username = get(entry, 'sAMAccountName') usersid = get(entry, 'objectSid') groups = [] try: wbu = self.wbc.get_user(name='{0}\\{1}'.format(self.realm, username)) except: return if not wbu: logging.warning('User {0} found in LDAP, but not in winbindd.'.format(username)) return if get(entry, 'memberOf'): builder = LdapQueryBuilder() qstr = builder.build_query([ ('member', '=', dn), ('objectClass', '=', 'group') ]) for r in self.search(self.base_dn, qstr, attributes=['objectGUID']): r = dict(r['attributes']) guid = uuid.UUID(get(r, 'objectGUID')) groups.append(str(guid)) return { 'id': str(uuid.UUID(get(entry, 'objectGUID'))), 'sid': str(usersid), 'uid': wbu.passwd.pw_uid, 'builtin': False, 'username': username, 'aliases': [wbu.passwd.pw_name], 'full_name': get(entry, 'name'), 'email': None, 'locked': False, 'sudo': False, 'password_disabled': False, 'group': str(self.domain_users_guid), 'groups': groups, 'shell': wbu.passwd.pw_shell, 'home': wbu.passwd.pw_dir }
def extend_query(): for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')): password = q.get(i, 'credentials.password') if password: q.set(i, 'credentials.password', Password(password)) i['status'] = lazy(self.get_status, i['id']) yield i
def run(self, context, args, kwargs, opargs): t = self.parent.entity return Object( Object.Item('ID', 'id', t['id']), Object.Item('Name', 'name', t['name']), Object.Item('Description', 'description', get(t, 'description.message')), Object.Item('Object name', 'object', get(t, 'description.name')), Object.Item('State', 'state', t['state']), Object.Item('Started at', 'started_at', t['started_at']), Object.Item('Started by', 'started_by', t['user']), Object.Item('Resources assigned', 'resources', t['resources']), Object.Item('Warnings', 'warnings', t['warnings']), Object.Item('Error', 'error', t['error']), Object.Item('Arguments', 'arguments', t['args']), Object.Item('Result', 'result', t['result']), Object.Item('Output', 'output', t['output']), Object.Item('Resource usage', 'rusage', t['rusage']) )
def attr_query(self): class Test(object): pass c = Test() d = {} q.set(c, 'f', True) q.set(d, 'f2', Test()) q.set(d, 'f2.nested', True) if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(q.get(d, 'f2'), Test): l = [d, c] if q.contains(c, 'f'): q.delete(c, 'f') return bool(q.query(l, ('f2.nested', '=', True), count=True)) return False
def wait(self): self.task = self.subscriber.wait_for( self.tid, lambda o: o['state'] in ('FINISHED', 'FAILED', 'ABORTED')) if self.task['state'] != 'FINISHED': raise RuntimeError('Task {0} failed: {1}'.format( self.tid, get(self.task, 'error.message'))) return self.result or self.task['result']
def made_of_good_disks(self): pool = self.dispatcher.call_sync('boot.pool.query') disks = self.dispatcher.call_sync('disk.query', [('id', 'in', pool['disks'])]) for d in disks: if q.get(d, 'controller.controller_name') == 'umass-sim': return False return True
def make_snapshot_entry(action): snapname = '{0}@{1}'.format(action['localfs'], action['snapshot']) filename = hashlib.md5(snapname.encode('utf-8')).hexdigest() snap = self.dispatcher.call_sync( 'zfs.snapshot.query', [('id', '=', snapname)], {'single': True} ) txg = q.get(snap, 'properties.createtxg.rawvalue') return { 'name': snapname, 'anchor': action.get('anchor'), 'incremental': action['incremental'], 'created_at': int(q.get(snap, 'properties.creation.rawvalue')), 'uuid': q.get(snap, 'properties.org\\.freenas:uuid.value'), 'txg': int(txg) if txg else None, 'filename': filename }
def get_enclosure(disk): enc_id = q.get(disk, 'status.enclosure') if not enc_id: return enclosure = context.entity_subscribers['disk.enclosure'].get(enc_id) if not enclosure: return return '{description} ({name})'.format(**enclosure)
def set_related_enabled(name, enabled): pool_properties = dispatcher.call_sync( 'zfs.pool.query', [('name', '=', name)], {'single': True, 'select': 'properties'} ) if not q.get(pool_properties, 'readonly.parsed'): path = dispatcher.call_sync('volume.resolve_path', name, '') dispatcher.call_task_sync('share.update_related', path, {'enabled': enabled}) dispatcher.call_task_sync('share.update_related', os.path.join('/dev/zvol', name), {'enabled': enabled})
def attr_query(self): class Test(object): pass c = Test() d = {} q.set(c, 'f', True) q.set(d, 'f2', Test()) q.set(d, 'f2.nested', True) if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance( q.get(d, 'f2'), Test): l = [d, c] if q.contains(c, 'f'): q.delete(c, 'f') return bool(q.query(l, ('f2.nested', '=', True), count=True)) return False
def listen(self, id): q = Queue() self.listeners.setdefault(id, []).append(q) try: o = self.get(id, viewport=True) yield ('create', o, o) while True: yield q.get() finally: self.listeners[id].remove(q)
def delete(self, this, kwargs): q.set( self.parent.entity, self.parent_path, list( filter( lambda i: i[self.primary_key_name] != this.entity[ self.primary_key_name], q.get(self.parent.entity, self.parent_path)))) return self.parent.save()
def run(self, directory): try: params = self.dispatcher.call_sync( 'dscached.management.normalize_parameters', directory['type'], directory.get('parameters', {})) except RpcException as err: raise TaskException(err.code, err.message) if self.datastore.exists('directories', ('name', '=', directory['name'])): raise TaskException( errno.EEXIST, 'Directory {0} already exists'.format(directory['name'])) normalize( directory, { 'enabled': False, 'enumerate': True, 'immutable': False, 'uid_range': None, 'gid_range': None }) # Replace passed in params with normalized ones directory['parameters'] = params for k, v in directory['parameters'].items(): if k == 'password': directory['parameters'][k] = unpassword(v) if directory['type'] == 'winbind': normalize(directory, { 'uid_range': [100000, 999999], 'gid_range': [100000, 999999] }) smb = self.dispatcher.call_sync('service.query', [('name', '=', 'smb')], {"single": True}) if not q.get(smb, 'config.enable'): q.set(smb, 'config.enable', True) self.run_subtask_sync('service.update', smb['id'], smb) self.id = self.datastore.insert('directories', directory) self.dispatcher.call_sync('dscached.management.configure_directory', self.id) self.dispatcher.dispatch_event('directory.changed', { 'operation': 'create', 'ids': [self.id] }) node = ConfigNode('directory', self.configstore) node['search_order'] = node['search_order'].value + [directory['name']] self.dispatcher.call_sync('dscached.management.reload_config') return self.id
def delete(self, this, kwargs): q.set( self.parent.entity, self.parent_path, list(filter( lambda i: i[self.primary_key_name] != this.entity[self.primary_key_name], q.get(self.parent.entity, self.parent_path) )) ) return self.parent.save()
def run(self, disk): boot_pool_name = self.configstore.get('system.boot_pool_name') pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool') vdev = first_or_default( lambda v: os.path.join('/dev', disk + 'p2') == v['path'], q.get(pool, 'groups.data.0.children') ) if not vdev: raise TaskException(errno.ENOENT, 'Disk {0} not found in the boot pool'.format(disk)) self.run_subtask_sync('zfs.pool.detach', boot_pool_name, vdev['guid'])
def convert_bootenv(boot_pool, ds): root_mount = dispatcher.threaded(bsd.statfs, '/') path = ds['id'].split('/') if len(path) != 3: return if path[:2] != [boot_pool['id'], 'ROOT']: return return { 'active': root_mount.source == ds['id'], 'keep': q.get(ds, 'properties.beadm:keep.value') not in ('no', 'off', 'False'), 'on_reboot': q.get(boot_pool, 'properties.bootfs.value') == ds['id'], 'id': q.get(ds, 'properties.beadm:nickname.value', path[-1]), 'space': q.get(ds, 'properties.used.parsed'), 'realname': path[-1], 'mountpoint': ds.get('mountpoint'), 'created': datetime.fromtimestamp(int(q.get(ds, 'properties.creation.rawvalue'))) }
def run(self, peer, initial_credentials): if 'name' not in peer: raise TaskException(errno.EINVAL, 'Name has to be specified') if self.datastore.exists('peers', ('name', '=', peer['name'])): raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name'])) password = q.get(peer, 'credentials.password') if password: q.set(peer, 'credentials.password', unpassword(password)) return self.datastore.insert('peers', peer)
def run(self, peer, initial_credentials): if 'name' not in peer: raise TaskException(errno.EINVAL, 'Name has to be specified') normalize(peer['credentials'], { 'port': 22, 'password': None, 'privkey': None, 'hostkey': None }) password = q.get(peer, 'credentials.password') if password: q.get(peer, 'credentials.password', password.secret) if self.datastore.exists('peers', ('name', '=', peer['name'])): raise TaskException( errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name'])) return self.datastore.insert('peers', peer)
def find_dumps(dispatcher): logger.warning('Finding and saving crash dumps') for disk in dispatcher.call_sync( 'disk.query', [('id', 'in', get_available_disks(dispatcher)), ('online', '=', True)] ): swap = q.get(disk, 'status.swap_partition_path') if swap: try: system('/sbin/savecore', '/data/crash', swap) except SubprocessException: continue
def run(self, id, updated_fields): peer = self.datastore.get_by_id('peers', id) if not peer: raise TaskException(errno.ENOENT, 'Peer {0} does not exist'.format(id)) password = q.get(updated_fields, 'credentials.password') if password: q.set(updated_fields, 'credentials.password', unpassword(password)) peer.update(updated_fields) if 'name' in updated_fields and self.datastore.exists('peers', ('name', '=', peer['name'])): raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name'])) self.datastore.update('peers', id, peer)
def run(self, directory): try: params = self.dispatcher.call_sync( 'dscached.management.normalize_parameters', directory['type'], directory.get('parameters', {}) ) except RpcException as err: raise TaskException(err.code, err.message) if self.datastore.exists('directories', ('name', '=', directory['name'])): raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(directory['name'])) normalize(directory, { 'enabled': False, 'enumerate': True, 'immutable': False, 'uid_range': None, 'gid_range': None }) # Replace passed in params with normalized ones directory['parameters'] = params for k, v in directory['parameters'].items(): if k == 'password': directory['parameters'][k] = unpassword(v) if directory['type'] == 'winbind': normalize(directory, { 'uid_range': [100000, 999999], 'gid_range': [100000, 999999] }) smb = self.dispatcher.call_sync('service.query', [('name', '=', 'smb')], {"single": True}) if not q.get(smb, 'config.enable'): q.set(smb, 'config.enable', True) self.run_subtask_sync('service.update', smb['id'], smb) self.id = self.datastore.insert('directories', directory) self.dispatcher.call_sync('dscached.management.configure_directory', self.id) self.dispatcher.dispatch_event('directory.changed', { 'operation': 'create', 'ids': [self.id] }) node = ConfigNode('directory', self.configstore) node['search_order'] = node['search_order'].value + [directory['name']] self.dispatcher.call_sync('dscached.management.reload_config') return self.id
def __getitem__(self, item): if not item: return '\n'.join(['instance-id', 'local-hostname']) if item == 'instance-id': return str(self.vm['id']) if item == 'local-hostname': return self.vm['name'] if item == 'user-data': return get(self.vm, 'config.cloud_init') or '' return ''
def get_status(self, id): si = None peer = self.datastore.get_by_id('peers', id) if peer['type'] != 'vmware': raise RpcException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type'])) try: start_time = datetime.now() ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE si = connect.SmartConnect(host=q.get(peer, 'credentials.address'), user=q.get(peer, 'credentials.username'), pwd=q.get(peer, 'credentials.password'), sslContext=ssl_context) delta = datetime.now() - start_time except: return {'state': 'OFFLINE', 'rtt': None} finally: if si: connect.Disconnect(si) return {'state': 'ONLINE', 'rtt': delta.total_seconds()}
def __getitem__(self, item): if not item: return "\n".join(["instance-id", "local-hostname"]) if item == "instance-id": return str(self.vm["id"]) if item == "local-hostname": return self.vm["name"] if item == "user-data": return get(self.vm, "config.cloud_init") or "" return ""
def complete(self, context, **kwargs): props = [] username = q.get(kwargs, 'kwargs.username') password = q.get(kwargs, 'kwargs.password') if username and password: if not self.ticket_categories: self.ticket_categories.update(context.call_sync( 'support.categories', str(username), Password(str(password)) )) if self.ticket_categories: props += [EnumComplete('category=', list(self.ticket_categories.keys()))] props += [NullComplete('subject=')] props += [NullComplete('description=')] props += [EnumComplete('type=', ['bug', 'feature'])] props += [EnumComplete('attach_debug_data=', ['yes', 'no'])] props += [NullComplete('attachments=')] return props + [ NullComplete('username='******'password='), ]
def get_status(self, id): si = None peer = self.datastore.get_by_id('peers', id) if peer['type'] != 'vmware': raise RpcException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type'])) try: start_time = datetime.now() ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ssl_context.verify_mode = ssl.CERT_NONE si = connect.SmartConnect( host=q.get(peer, 'credentials.address'), user=q.get(peer, 'credentials.username'), pwd=q.get(peer, 'credentials.password'), sslContext=ssl_context ) delta = datetime.now() - start_time except: return {'state': 'OFFLINE', 'rtt': None} finally: if si: connect.Disconnect(si) return {'state': 'ONLINE', 'rtt': delta.total_seconds()}
def do_append(self, obj, value): if self.type not in (ValueType.SET, ValueType.ARRAY): raise ValueError('Property is not a set or array') value = read_value(value, self.type) oldvalues = q.get(obj, self.set) if oldvalues is not None: newvalues = oldvalues + list(value) else: newvalues = value if isinstance(self.set, collections.Callable): self.set(obj, newvalues) return q.set(obj, self.set, newvalues)
def do_remove(self, obj, value): if self.type not in (ValueType.SET, ValueType.ARRAY): raise ValueError('Property is not a set or array') value = read_value(value, self.type) oldvalues = q.get(obj, self.set) newvalues = oldvalues for v in value: if v in newvalues: newvalues.remove(v) else: raise CommandException(_('{0} is not a value in {1}'.format(v, self.set))) if isinstance(self.set, collections.Callable): self.set(obj, newvalues) return q.set(obj, self.set, newvalues)