def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] logging.basicConfig(file=sys.stdout, level=logging.DEBUG) try: self.datastore = get_default_datastore() self.conn = Client() self.conn.connect('127.0.0.1') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.rpc.register_service_instance('taskproxy', self.service) task = self.conn.call_sync('task.checkin', key) module = imp.load_source('plugin', task['filename']) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) try: self.instance = getattr(module, task['class'])( DispatcherWrapper(self.conn), self.datastore) self.instance.configstore = ConfigStore(self.datastore) self.running.set() result = self.instance.run(*task['args']) except BaseException, err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) self.put_status('FAILED', exception=err) else:
def init_datastore(self): try: self.datastore = get_datastore(self.config) self.datastore_log = get_datastore(self.config, log=True) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def __init__(self, dispatcher, datastore): self.dispatcher = dispatcher self.datastore = datastore self.configstore = ConfigStore(datastore) self.logger = logging.getLogger(self.__class__.__name__) self.subtasks = [] self.progress_callbacks = {} self.user = None self.environment = {} self.do_abort = False self.rlock = RLock()
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_default_datastore() cs = ConfigStore(ds) lldp = orm['services.LLDP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='lldp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.lldp.enable', svc.srv_enable) cs.set('service.lldp.save_description', lldp.lldp_intdesc) cs.set('service.lldp.country_code', lldp.lldp_country or None) cs.set('service.lldp.location', lldp.lldp_location or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) iscsi = orm['services.iSCSITargetGlobalConfiguration'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='iscsi') svc = svc[0] if svc.exists() else None if svc: cs.set('service.iscsi.enable', svc.srv_enable) cs.set('service.iscsi.base_name', iscsi.iscsi_basename) cs.set('service.iscsi.isns_servers', iscsi.iscsi_isns_servers) cs.set('service.iscsi.pool_space_threshold', iscsi.iscsi_pool_avail_threshold) # iSCSI Portals iscsi_portals = orm['services.iSCSITargetPortal'].objects.all() for p in iscsi_portals: ds.insert('iscsi.portals', { 'id': 'pg{0}'.format(p.id), 'tag': p.iscsi_target_portal_tag, 'description': p.iscsi_target_portal_comment, 'discovery_auth_group': 'ag{0}'.format(p.iscsi_target_portal_discoveryauthgroup), 'listen': [{'address': i.iscsi_target_portalip_ip, 'port': i.iscsi_target_portalip_port} for i in p.ips.all()] }) # iSCSI Targets iscsi_targets = orm['services.iSCSITarget'].objects.all() for t in iscsi_targets: ds.insert('iscsi.targets', { 'id': t.iscsi_target_name, 'portal_group': 'default', # XXX: Needs to pass proper portal group 'auth_group': 'no-authentication', # XXX: Needs to pass proper auth group 'description': t.iscsi_target_alias, 'extents': [] })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_default_datastore() cs = ConfigStore(ds) rsyncd = orm['services.Rsyncd'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='rsync') svc = svc[0] if svc.exists() else None if svc: cs.set('service.rsyncd.enable', svc.srv_enable) cs.set('service.rsyncd.port', rsyncd.rsyncd_port) cs.set('service.rsyncd.auxiliary', rsyncd.rsyncd_auxiliary) for rmod in orm['services.RsyncMod'].objects.all(): if rmod.rsyncmod_path == 'ro': mode = 'READONLY' elif rmod.rsyncmod_path == 'wo': mode = 'WRITEONLY' else: mode = 'READWRITE' ds.insert('rsyncd-module', { 'name': rmod.rsyncmod_name, 'description': rmod.rsyncmod_comment or None, 'path': rmod.rsyncmod_path, 'mode': mode, 'max_connections': rmod.rsyncmod_maxconn or None, 'user': rmod.rsyncmod_user, 'group': rmod.rsyncmod_group, 'hosts_allow': rmod.rsyncmod_hostsallow or None, 'hosts_deny': rmod.rsyncmod_hostsdeny or None, 'auxiliary': rmod.rsyncmod_auxiliary or None, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_default_datastore() cs = ConfigStore(ds) tftp = orm['services.TFTP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='tftp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.tftp.enable', svc.srv_enable) cs.set('service.tftp.path', tftp.tftp_directory or None) cs.set('service.tftp.allow_new_files', tftp.tftp_newfiles) cs.set('service.tftp.port', tftp.tftp_port) cs.set('service.tftp.username', tftp.tftp_username) cs.set('service.tftp.umask', tftp.tftp_umask) cs.set('service.tftp.auxiliary', tftp.tftp_options or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) webdav = orm['services.WebDAV'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='webdav') svc = svc[0] if svc.exists() else None if svc: cs.set('service.webdav.enable', svc.srv_enable) if webdav.webdav_protocol == 'http': protocol = ['HTTP'] elif webdav.webdav_protocol == 'https': protocol = ['HTTPS'] else: protocol = ['HTTP', 'HTTPS'] cs.set('service.webdav.protocol', protocol) cs.set('service.webdav.http_port', webdav.webdav_tcpport or 8080) cs.set('service.webdav.https_port', webdav.webdav_tcpportssl or 8081) try: if webdav.webdav_password: from freenasUI.middleware.notifier import notifier cs.set('service.webdav.password', notifier().pwenc_decrypt(webdav.webdav_password)) except: pass cs.set('service.webdav.authentication', webdav.webdav_htauth.upper())
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) afp = orm['services.AFP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='afp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.afp.enable', svc.srv_enable) cs.set('service.afp.guest_enable', afp.afp_srv_guest) if afp.afp_srv_guest_user: cs.set('service.afp.guest_user', afp.afp_srv_guest_user) if afp.afp_srv_bindip: cs.set('service.afp.bind_addresses', afp.afp_srv_bindip) cs.set('service.afp.connections_limit', afp.afp_srv_connections_limit) cs.set('service.afp.homedir_enable"', afp.afp_srv_homedir_enable) if afp.afp_srv_homedir: cs.set('service.afp.homedir_path', afp.afp_srv_homedir) if afp.afp_srv_homename: cs.set('service.afp.homedir_name', afp.afp_srv_homename) if afp.afp_srv_dbpath: cs.set('service.afp.dbpath', afp.afp_srv_dbpath) if afp.afp_srv_global_aux: cs.set('service.afp.auxiliary', afp.afp_srv_global_aux)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) email = orm['system.Email'].objects.order_by('-id')[0] cs.set('mail.from', email.em_fromemail) cs.set('mail.server', email.em_outgoingserver) cs.set('mail.port', email.em_port) encryption = 'PLAIN' if email.em_security in ('ssl', 'tls'): encryption = email.em_security.upper() cs.set('mail.encryption', encryption) cs.set('mail.auth', email.em_smtp) cs.set('mail.user', email.em_user) cs.set('mail.pass', email.em_pass)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) settings = orm['system.Settings'].objects.order_by('-id')[0] cs.set('system.language', settings.stg_language) cs.set('system.timezone', settings.stg_timezone) cs.set('system.console.keymap', settings.stg_kbdmap) cs.set('system.syslog_server', settings.stg_syslogserver) listen = [] if settings.stg_guiaddress: listen.append(settings.stg_guiaddress) if settings.stg_guiv6address: listen.append('[{0}]'.format(settings.stg_guiv6address)) cs.set('service.nginx.http.enable', settings.stg_guiprotocol in ('http', 'httphttps')) cs.set('service.nginx.https.enable', settings.stg_guiprotocol in ('https', 'httphttps')) cs.set('service.nginx.listen', listen) cs.set('service.nginx.http.port', settings.stg_guiport) cs.set('service.nginx.http.redirect_https', settings.stg_guihttpsredirect) cs.set('service.nginx.https.port', settings.stg_guihttpsport)
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.rpc.register_service_instance('dscached.account', AccountService(self)) self.rpc.register_service_instance('dscached.group', GroupService(self)) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_enabled_directories(self): return list(filter(None, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return ['local', 'system'] + self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories ) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories ) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.start(address) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error('Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning('Cannot connect to dispatcher: {0}, retrying in 1 second'.format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = imp.load_source(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) directory.configure() self.directories.append(directory) except BaseException as err: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get('directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('/var/log/dscached.log', 'DEBUG') setproctitle.setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.init_directories() self.client.wait_forever()
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) adv = orm['system.Advanced'].objects.order_by('-id')[0] cs.set('system.console.cli', adv.adv_consolemenu) cs.set('system.console.screensaver', adv.adv_consolescreensaver) cs.set('system.serial.console', adv.adv_serialconsole) cs.set('system.serial.port', adv.adv_serialport) cs.set('system.serial.speed', int(adv.adv_serialspeed)) cs.set('service.powerd.enable', adv.adv_powerdaemon) cs.set('system.swapondrive', adv.adv_swapondrive) cs.set('system.autotune', adv.adv_autotune) cs.set('system.debug.kernel', adv.adv_debugkernel) cs.set('system.upload_crash', adv.adv_uploadcrash) cs.set('system.motd', adv.adv_motd) cs.set('system.boot_scrub_internal', adv.adv_boot_scrub) user = ds.query('users', ('username', '=', adv.adv_periodic_notifyuser), single=True) if user: cs.set('system.periodic.notify_user', user['id']) root = ds.query('users', ('uid', '=', 0), single=True) if root: root['attributes'].update({ 'gui_messages_footer': adv.adv_consolemsg, 'gui_traceback': adv.adv_traceback, 'gui_advancedmode': adv.adv_advancedmode, }) ds.update('users', root['id'], root)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if "FREENAS_INSTALL" in os.environ: return ds = get_default_datastore() cs = ConfigStore(ds) nfs = orm["services.NFS"].objects.all()[0] svc = orm["services.services"].objects.filter(srv_service="nfs") svc = svc[0] if svc.exists() else None if svc: cs.set("service.nfs.enable", svc.srv_enable) cs.set("service.nfs.servers", nfs.nfs_srv_servers) cs.set("service.nfs.udp", nfs.nfs_srv_udp) cs.set("service.nfs.nonroot", nfs.nfs_srv_allow_nonroot) cs.set("service.nfs.v4", nfs.nfs_srv_v4) cs.set("service.nfs.v4_kerberos", nfs.nfs_srv_v4_krb) if nfs.nfs_srv_bindip: cs.set("service.nfs.bind_addresses", nfs.nfs_srv_bindip.split(",")) if nfs.nfs_srv_mountd_port: cs.set("service.nfs.mountd_port", nfs.nfs_srv_mountd_port) if nfs.nfs_srv_rpcstatd_port: cs.set("service.nfs.rpcstatd_port", nfs.nfs_srv_rpcstatd_port) if nfs.nfs_srv_rpclockd_port: cs.set("service.nfs.rpclockd_port", nfs.nfs_srv_rpclockd_port)
def __init__(self, dispatcher, datastore): self.dispatcher = dispatcher self.datastore = datastore self.configstore = ConfigStore(datastore) self.logger = logging.getLogger(self.__class__.__name__)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) nfs = orm['services.NFS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='nfs') svc = svc[0] if svc.exists() else None if svc: cs.set('service.nfs.enable', svc.srv_enable) cs.set('service.nfs.servers', nfs.nfs_srv_servers) cs.set('service.nfs.udp', nfs.nfs_srv_udp) cs.set('service.nfs.nonroot', nfs.nfs_srv_allow_nonroot) cs.set('service.nfs.v4', nfs.nfs_srv_v4) cs.set('service.nfs.v4_kerberos', nfs.nfs_srv_v4_krb) if nfs.nfs_srv_bindip: cs.set('service.nfs.bind_addresses', nfs.nfs_srv_bindip.split(',')) if nfs.nfs_srv_mountd_port: cs.set('service.nfs.mountd_port', nfs.nfs_srv_mountd_port) if nfs.nfs_srv_rpcstatd_port: cs.set('service.nfs.rpcstatd_port', nfs.nfs_srv_rpcstatd_port) if nfs.nfs_srv_rpclockd_port: cs.set('service.nfs.rpclockd_port', nfs.nfs_srv_rpclockd_port)
def forwards(self, orm): ds = get_datastore() cs = ConfigStore(ds) # Migrate global network configuration globalconf = orm.GlobalConfiguration.objects.order_by("-id")[0] cs.set('system.hostname', globalconf.gc_hostname + '.' + globalconf.gc_domain) cs.set('network.gateway.ipv4', globalconf.gc_ipv4gateway or None) cs.set('network.gateway.ipv6', globalconf.gc_ipv6gateway or None) cs.set('network.http_proxy', globalconf.gc_httpproxy or None) cs.set( 'network.dns.addresses', list( filter(None, [ globalconf.gc_nameserver1 or None, globalconf.gc_nameserver2 or None, globalconf.gc_nameserver3 or None, ]))) cs.set('network.netwait.enable', globalconf.gc_netwait_enabled) cs.set('network.netwait.addresses', globalconf.gc_netwait_ip.split()) old_hosts = [] # Migrate hosts database for line in globalconf.gc_hosts.split('\n'): line = line.strip() if not line: continue ip, *names = line.split(' ') old_hosts.extend([{ 'id': name, 'addresses': [ip] } for name in names]) ensure_unique(ds, ('network.hosts', 'id'), old_ids=[x['id'] for x in old_hosts]) for host in old_hosts: ds.insert('network.hosts', host) # Migrate VLAN interfaces configuration for unit, i in enumerate(orm.VLAN.objects.all()): ds.insert( 'network.interfaces', { 'id': 'vlan{0}'.format(unit), 'name': None, 'type': 'VLAN', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'vlan': { 'parent': i.vlan_pint, 'tag': i.vlan_tag }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate LAGG interfaces configuration for unit, i in enumerate(orm.LAGGInterface.objects.all()): ds.insert( 'network.interfaces', { 'id': 'lagg{0}'.format(unit), 'name': None, 'type': 'LAGG', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'lagg': { 'protocol': LAGG_PROTOCOL_MAP[i.lagg_protocol], 'ports': [ m.int_interface for m in i.lagg_interfacemembers_set.all() ] }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate IP configuration autoconfigure = True for i in orm.Interfaces.objects.all(): autoconfigure = False aliases = [] iface = ds.get_by_id('network.interfaces', i.int_interface) if not iface: iface = { 'enabled': True, } iface.update({ 'name': i.int_name, 'dhcp': i.int_dhcp, 'aliases': aliases }) if i.int_ipv4address: aliases.append({ 'type': 'INET', 'address': str(i.int_ipv4address), 'netmask': int(i.int_v4netmaskbit) }) if i.int_ipv6address: aliases.append({ 'type': 'INET6', 'address': str(i.int_ipv6address), 'netmask': int(i.int_v6netmaskbit) }) for alias in i.alias_set.all(): if alias.alias_v4address: aliases.append({ 'type': 'INET', 'address': str(alias.alias_v4address), 'netmask': int(alias.alias_v4netmaskbit) }) if alias.alias_v6address: aliases.append({ 'type': 'INET6', 'address': str(alias.alias_v6address), 'netmask': int(alias.alias_v6netmaskbit) }) m = re.search(r'mtu (\d+)', i.int_options) if m: iface['mtu'] = int(m.group(1)) m = re.search(r'media (\w+)', i.int_options) if m: iface['media'] = m.group(1) m = re.search(r'mediaopt (\w+)', i.int_options) if m: opt = m.group(1) if opt in MEDIAOPT_MAP: iface['mediaopts'] = [MEDIAOPT_MAP[opt]] # Try to read capabilities for k, v in CAPABILITY_MAP.items(): if '-{0}'.format(k) in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('del', []) l += v elif k in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('add', []) l += v ds.upsert('network.interfaces', i.int_interface, iface) # If there are no interfaces, let it autoconfigure cs.set('network.autoconfigure', autoconfigure) # Migrate static routes for i in orm.StaticRoute.objects.all(): try: net = ipaddress.ip_network(i.sr_destination) except ValueError as e: print("Invalid network {0}: {1}".format(i.sr_destination, e)) continue ds.insert( 'network.routes', { 'network': str(net.network_address), 'netmask': net.prefixlen, 'gateway': i.sr_gateway, 'type': 'INET' }) ds.collection_record_migration('network.interfaces', 'freenas9_migration') ds.collection_record_migration('network.routes', 'freenas9_migration') ds.collection_record_migration('network.hosts', 'freenas9_migration')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) snmp = orm['services.SNMP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='snmp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.snmp.enable', svc.srv_enable) cs.set('service.snmp.location', snmp.snmp_location or None) cs.set('service.snmp.contact', snmp.snmp_contact or None) cs.set('service.snmp.community', snmp.snmp_community or 'public') cs.set('service.snmp.v3', snmp.snmp_v3) cs.set('service.snmp.v3_username', snmp.snmp_v3_username or None) cs.set('service.snmp.v3_password', snmp.snmp_v3_password or None) cs.set('service.snmp.v3_auth_type', snmp.snmp_v3_authtype or 'SHA') cs.set('service.snmp.v3_privacy_protocol', snmp.snmp_v3_privproto or 'AES') cs.set('service.snmp.v3_privacy_passphrase', snmp.snmp_v3_privpassphrase or None) cs.set('service.snmp.auxiliary', snmp.snmp_options or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) tftp = orm['services.TFTP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='tftp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.tftp.enable', svc.srv_enable) cs.set('service.tftp.path', tftp.tftp_directory or None) cs.set('service.tftp.allow_new_files', tftp.tftp_newfiles) cs.set('service.tftp.port', tftp.tftp_port) cs.set('service.tftp.username', tftp.tftp_username) cs.set('service.tftp.umask', tftp.tftp_umask) cs.set('service.tftp.auxiliary', tftp.tftp_options or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) def migrate_cert(certs): id_uuid_map = {} signedby = [] for obj in certs: if obj.cert_type == 0x1: _type = 'CA_EXISTING' elif obj.cert_type == 0x2: _type = 'CA_INTERNAL' elif obj.cert_type == 0x4: _type = 'CA_INTERMEDIATE' elif obj.cert_type == 0x8: _type = 'CERT_EXISTING' elif obj.cert_type == 0x10: _type = 'CERT_INTERNAL' else: _type = 'CERT_CSR' cert = { 'type': _type, 'name': obj.cert_name, 'certificate': obj.cert_certificate, 'privatekey': obj.cert_privatekey, 'csr': obj.cert_CSR, 'key_length': obj.cert_key_length, 'digest_algorithm': obj.cert_digest_algorithm, 'lifetime': obj.cert_lifetime, 'country': obj.cert_country, 'state': obj.cert_state, 'city': obj.cert_city, 'organization': obj.cert_organization, 'email': obj.cert_email, 'common': obj.cert_common, 'serial': obj.cert_serial, } pkey = ds.insert('crypto.certificates', cert) id_uuid_map[obj.id] = pkey if obj.cert_signedby is not None: signedby.append(obj.id) return id_uuid_map, signedby def migrate_signedby(model, id_uuid_map, signedby, ca_map): for id in signedby: cobj = model.objects.get(id=id) pkey = id_uuid_map.get(id) if pkey is None: continue cert = ds.get_by_id('crypto.certificates', pkey) if cobj.cert_signedby is None: continue signedby = ca_map.get(cobj.cert_signedby.id) if signedby is None: continue cert['signedby'] = signedby ds.update('crypto.certificates', pkey, cert) id_uuid_map, signedby = migrate_cert( orm['system.CertificateAuthority'].objects.order_by( 'cert_signedby')) migrate_signedby(orm['system.CertificateAuthority'], id_uuid_map, signedby, id_uuid_map) cert_id_uuid_map, cert_signedby = migrate_cert( orm['system.Certificate'].objects.order_by('cert_signedby')) migrate_signedby(orm['system.Certificate'], cert_id_uuid_map, cert_signedby, id_uuid_map) settings = orm['system.Settings'].objects.order_by('-id')[0] if settings.stg_guicertificate: uuid = cert_id_uuid_map.get(settings.stg_guicertificate.id) if uuid: cs.set('service.nginx.https.certificate', uuid) ds.collection_record_migration('crypto.certificates', 'freenas9_migration')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ssh = orm['services.SSH'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='ssh') svc = svc[0] if svc.exists() else None if svc: cs.set('service.sshd.enable', svc.srv_enable) cs.set('service.sshd.port', ssh.ssh_tcpport) cs.set('service.sshd.permit_root_login', ssh.ssh_rootlogin) cs.set('service.sshd.allow_password_auth', ssh.ssh_passwordauth) cs.set('service.sshd.allow_port_forwarding', ssh.ssh_tcpfwd) cs.set('service.sshd.compression', ssh.ssh_compression) cs.set('service.sshd.sftp_log_level', ssh.ssh_sftp_log_level) cs.set('service.sshd.sftp_log_facility', ssh.ssh_sftp_log_facility) cs.set('service.sshd.auxiliary', ssh.ssh_options or None) cs.set('service.sshd.keys.host.private', ssh.ssh_host_key) cs.set('service.sshd.keys.host.public', ssh.ssh_host_key_pub) cs.set('service.sshd.keys.dsa.private', ssh.ssh_host_dsa_key or None) cs.set('service.sshd.keys.dsa.public', ssh.ssh_host_dsa_key_pub or None) if ssh.ssh_host_dsa_key_cert_pub: cs.set('service.sshd.keys.dsa.certificate', ssh.ssh_host_dsa_key_cert_pub) cs.set('service.sshd.keys.ecdsa.private', ssh.ssh_host_ecdsa_key or None) cs.set('service.sshd.keys.ecdsa.public', ssh.ssh_host_ecdsa_key_pub or None) if ssh.ssh_host_ecdsa_key_cert_pub: cs.set('service.sshd.keys.ecdsa.certificate', ssh.ssh_host_ecdsa_key_cert_pub) cs.set('service.sshd.keys.ed25519.private', ssh.ssh_host_ed25519_key or None) cs.set('service.sshd.keys.ed25519.public', ssh.ssh_host_ed25519_key_pub or None) if ssh.ssh_host_ed25519_key_cert_pub: cs.set('service.sshd.keys.ed25519.certificate', ssh.ssh_host_ed25519_key_cert_pub) cs.set('service.sshd.keys.rsa.private', ssh.ssh_host_rsa_key or None) cs.set('service.sshd.keys.rsa.public', ssh.ssh_host_rsa_key_pub or None) if ssh.ssh_host_rsa_key_cert_pub: cs.set('service.sshd.keys.rsa.certificate', ssh.ssh_host_rsa_key_cert_pub)
def forwards(self, orm): ds = get_datastore() cs = ConfigStore(ds) # Migrate global network configuration globalconf = orm.GlobalConfiguration.objects.order_by("-id")[0] cs.set('system.hostname', globalconf.gc_hostname + '.' + globalconf.gc_domain) cs.set('network.gateway.ipv4', globalconf.gc_ipv4gateway or None) cs.set('network.gateway.ipv6', globalconf.gc_ipv6gateway or None) cs.set('network.http_proxy', globalconf.gc_httpproxy or None) cs.set('network.dns.addresses', list(filter(None, [ globalconf.gc_nameserver1 or None, globalconf.gc_nameserver2 or None, globalconf.gc_nameserver3 or None, ]))) cs.set('network.netwait.enable', globalconf.gc_netwait_enabled) cs.set('network.netwait.addresses', globalconf.gc_netwait_ip.split()) old_hosts = [] # Migrate hosts database for line in globalconf.gc_hosts.split('\n'): line = line.strip() if not line: continue ip, *names = line.split(' ') old_hosts.extend([{'id': name, 'addresses': [ip]} for name in names]) ensure_unique(ds, ('network.hosts', 'id'), old_ids=[x['id'] for x in old_hosts]) for host in old_hosts: ds.insert('network.hosts', host) # Migrate VLAN interfaces configuration for unit, i in enumerate(orm.VLAN.objects.all()): ds.insert('network.interfaces', { 'id': 'vlan{0}'.format(unit), 'name': None, 'type': 'VLAN', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'vlan': { 'parent': i.vlan_pint, 'tag': i.vlan_tag }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate LAGG interfaces configuration for unit, i in enumerate(orm.LAGGInterface.objects.all()): ds.insert('network.interfaces', { 'id': 'lagg{0}'.format(unit), 'name': None, 'type': 'LAGG', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'lagg': { 'protocol': LAGG_PROTOCOL_MAP[i.lagg_protocol], 'ports': [m.int_interface for m in i.lagg_interfacemembers_set.all()] }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate IP configuration autoconfigure = True for i in orm.Interfaces.objects.all(): autoconfigure = False aliases = [] iface = ds.get_by_id('network.interfaces', i.int_interface) if not iface: iface = { 'enabled': True, } iface.update({ 'name': i.int_name, 'dhcp': i.int_dhcp, 'aliases': aliases }) if i.int_ipv4address: aliases.append({ 'type': 'INET', 'address': str(i.int_ipv4address), 'netmask': int(i.int_v4netmaskbit) }) if i.int_ipv6address: aliases.append({ 'type': 'INET6', 'address': str(i.int_ipv6address), 'netmask': int(i.int_v6netmaskbit) }) for alias in i.alias_set.all(): if alias.alias_v4address: aliases.append({ 'type': 'INET', 'address': str(alias.alias_v4address), 'netmask': int(alias.alias_v4netmaskbit) }) if alias.alias_v6address: aliases.append({ 'type': 'INET6', 'address': str(alias.alias_v6address), 'netmask': int(alias.alias_v6netmaskbit) }) m = re.search(r'mtu (\d+)', i.int_options) if m: iface['mtu'] = int(m.group(1)) m = re.search(r'media (\w+)', i.int_options) if m: iface['media'] = m.group(1) m = re.search(r'mediaopt (\w+)', i.int_options) if m: opt = m.group(1) if opt in MEDIAOPT_MAP: iface['mediaopts'] = [MEDIAOPT_MAP[opt]] # Try to read capabilities for k, v in CAPABILITY_MAP.items(): if '-{0}'.format(k) in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('del', []) l += v elif k in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('add', []) l += v ds.upsert('network.interfaces', i.int_interface, iface) # If there are no interfaces, let it autoconfigure cs.set('network.autoconfigure', autoconfigure) # Migrate static routes for i in orm.StaticRoute.objects.all(): try: net = ipaddress.ip_network(i.sr_destination) except ValueError as e: print("Invalid network {0}: {1}".format(i.sr_destination, e)) continue ds.insert('network.routes', { 'network': str(net.network_address), 'netmask': net.prefixlen, 'gateway': i.sr_gateway, 'type': 'INET' }) ds.collection_record_migration('network.interfaces', 'freenas9_migration') ds.collection_record_migration('network.routes', 'freenas9_migration') ds.collection_record_migration('network.hosts', 'freenas9_migration')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if "FREENAS_INSTALL" in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ftp = orm["services.FTP"].objects.all()[0] svc = orm["services.services"].objects.filter(srv_service="ftp") svc = svc[0] if svc.exists() else None if svc: cs.set("service.ftp.enable", svc.srv_enable) tls_options = [] if ftp.ftp_tls_opt_allow_client_renegotiations: tls_options.append("ALLOW_CLIENT_RENEGOTIATIONS") if ftp.ftp_tls_opt_allow_dot_login: tls_options.append("ALLOW_DOT_LOGIN") if ftp.ftp_tls_opt_allow_per_user: tls_options.append("ALLOW_PER_USER") if ftp.ftp_tls_opt_common_name_required: tls_options.append("COMMON_NAME_REQUIRED") if ftp.ftp_tls_opt_enable_diags: tls_options.append("ENABLE_DIAGNOSTICS") if ftp.ftp_tls_opt_export_cert_data: tls_options.append("EXPORT_CERTIFICATE_DATA") if ftp.ftp_tls_opt_no_cert_request: tls_options.append("NO_CERTIFICATE_REQUEST") if ftp.ftp_tls_opt_no_empty_fragments: tls_options.append("NO_EMPTY_FRAGMENTS") if ftp.ftp_tls_opt_no_session_reuse_required: tls_options.append("NO_SESSION_REUSE_REQUIRED") if ftp.ftp_tls_opt_stdenvvars: tls_options.append("STANDARD_ENV_VARS") if ftp.ftp_tls_opt_dns_name_required: tls_options.append("DNS_NAME_REQUIRED") if ftp.ftp_tls_opt_ip_address_required: tls_options.append("IP_ADDRESS_REQUIRED") cs.set("service.ftp.port", ftp.ftp_port) cs.set("service.ftp.max_clients", ftp.ftp_clients) if ftp.ftp_ipconnections: cs.set("service.ftp.ip_connections", ftp.ftp_ipconnections) cs.set("service.ftp.login_attempt", ftp.ftp_loginattempt) cs.set("service.ftp.timeout", ftp.ftp_timeout) cs.set("service.ftp.root_login", ftp.ftp_rootlogin) cs.set("service.ftp.only_anonymous", ftp.ftp_onlyanonymous) if ftp.ftp_anonpath: cs.set("service.ftp.anonymous_path", ftp.ftp_anonpath) cs.set("service.ftp.only_local", ftp.ftp_onlylocal) cs.set("service.ftp.display_login", ftp.ftp_banner) cs.set("service.ftp.filemask", ftp.ftp_filemask) cs.set("service.ftp.dirmask", ftp.ftp_dirmask) cs.set("service.ftp.fxp", ftp.ftp_fxp) cs.set("service.ftp.resume", ftp.ftp_resume) cs.set("service.ftp.chroot", ftp.ftp_defaultroot) cs.set("service.ftp.ident", ftp.ftp_ident) cs.set("service.ftp.reverse_dns", ftp.ftp_reversedns) cs.set("service.ftp.masquerade_address", ftp.ftp_masqaddress) if ftp.ftp_passiveportsmin: cs.set("service.ftp.passive_ports_min", ftp.ftp_passiveportsmin) if ftp.ftp_passiveportsmax: cs.set("service.ftp.passive_ports_max", ftp.ftp_passiveportsmax) if ftp.ftp_localuserbw: cs.set("service.ftp.local_up_bandwidth", ftp.ftp_localuserbw) if ftp.ftp_localuserdlbw: cs.set("service.ftp.local_down_bandwidth", ftp.ftp_localuserdlbw) if ftp.ftp_anonuserbw: cs.set("service.ftp.anon_up_bandwidth", ftp.ftp_anonuserbw) if ftp.ftp_anonuserdlbw: cs.set("service.ftp.anon_down_bandwidth", ftp.ftp_anonuserdlbw) cs.set("service.ftp.tls", ftp.ftp_tls) if tls_options: cs.set("service.ftp.tls_options", tls_options) cs.set("service.ftp.auxiliary", ftp.ftp_options)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) smartd = orm['services.SMART'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='smartd') svc = svc[0] if svc.exists() else None if svc: cs.set('service.smartd.enable', svc.srv_enable) cs.set('service.smartd.interval', smartd.smart_interval) cs.set('service.smartd.power_mode', smartd.smart_powermode.upper()) cs.set('service.smartd.temp_difference', smartd.smart_difference or None) cs.set('service.smartd.temp_informational', smartd.smart_informational or None) cs.set('service.smartd.temp_critical', smartd.smart_critical or None)
class Main(object): def __init__(self): self.logger = logging.getLogger('dscached') self.config = None self.datastore = None self.configstore = None self.rpc = RpcContext() self.rpc.streaming_enabled = True self.rpc.streaming_burst = 16 self.client = None self.server = None self.plugin_dirs = [] self.plugins = {} self.directories = [] self.users_cache = TTLCacheStore() self.groups_cache = TTLCacheStore() self.hosts_cache = TTLCacheStore() self.cache_ttl = 7200 self.search_order = [] self.cache_enumerations = True self.cache_lookups = True self.home_directory_root = None self.account_service = AccountService(self) self.group_service = GroupService(self) self.rpc.register_service_instance('dscached.account', self.account_service) self.rpc.register_service_instance('dscached.group', self.group_service) self.rpc.register_service_instance('dscached.host', HostService(self)) self.rpc.register_service_instance('dscached.idmap', IdmapService(self)) self.rpc.register_service_instance('dscached.management', ManagementService(self)) self.rpc.register_service_instance('dscached.debug', DebugService()) def get_active_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, self.directories)) def get_searched_directories(self): return list( filter(lambda d: d and d.state == DirectoryState.BOUND, (self.get_directory_by_name(n) for n in self.get_search_order()))) def get_search_order(self): return self.search_order def get_directory_by_domain(self, domain_name): return first_or_default(lambda d: d.domain_name == domain_name, self.directories) def get_directory_by_name(self, name): return first_or_default(lambda d: d.name == name, self.directories) def get_directory_for_id(self, uid=None, gid=None): if uid is not None: if uid == 0: # Special case for root user return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid, self.directories) if gid is not None: if gid == 0: # Special case for wheel group return first_or_default(lambda d: d.plugin_type == 'local', self.directories) return first_or_default( lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid, self.directories) def get_home_directory(self, directory, username): if not self.home_directory_root: return '/nonexistent' return os.path.join(self.home_directory_root, f'{username}@{directory.domain_name}') def wait_for_etcd(self): self.client.test_or_wait_for_event( 'plugin.service_resume', lambda args: args['name'] == 'etcd.generation', lambda: 'etcd.generation' in self.client.call_sync( 'discovery.get_services')) def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore) def init_dispatcher(self): def on_error(reason, **kwargs): if reason in (ClientError.CONNECTION_CLOSED, ClientError.LOGOUT): self.logger.warning('Connection to dispatcher lost') self.connect() self.client = Client() self.client.on_error(on_error) self.connect() def init_server(self, address): self.server = Server(self) self.server.rpc = self.rpc self.server.streaming = True self.server.start(address, transport_options={'permissions': 0o777}) thread = Thread(target=self.server.serve_forever) thread.name = 'ServerThread' thread.daemon = True thread.start() def parse_config(self, filename): try: with open(filename, 'r') as f: self.config = json.load(f) except IOError as err: self.logger.error('Cannot read config file: %s', err.message) sys.exit(1) except ValueError: self.logger.error( 'Config file has unreadable format (not valid JSON)') sys.exit(1) self.plugin_dirs = self.config['dscached']['plugin-dirs'] def connect(self): while True: try: self.client.connect('unix:') self.client.login_service('dscached') self.client.enable_server(self.rpc) self.client.resume_service('dscached.account') self.client.resume_service('dscached.group') self.client.resume_service('dscached.host') self.client.resume_service('dscached.idmap') self.client.resume_service('dscached.management') self.client.resume_service('dscached.debug') return except (OSError, RpcException) as err: self.logger.warning( 'Cannot connect to dispatcher: {0}, retrying in 1 second'. format(str(err))) time.sleep(1) def scan_plugins(self): for i in self.plugin_dirs: self.scan_plugin_dir(i) def scan_plugin_dir(self, dir): self.logger.debug('Scanning plugin directory %s', dir) for f in os.listdir(dir): name, ext = os.path.splitext(os.path.basename(f)) if ext != '.py': continue try: plugin = load_module_from_file(name, os.path.join(dir, f)) plugin._init(self) except: self.logger.error('Cannot initialize plugin {0}'.format(f), exc_info=True) def register_plugin(self, name, cls): self.plugins[name] = cls self.logger.info('Registered plugin {0} (class {1})'.format(name, cls)) def register_schema(self, name, schema): self.client.register_schema(name, schema) def register_schemas(self): from freenas.dispatcher.model import context for name, schema in (s.__named_json_schema__() for s in context.local_json_schema_objects): self.logger.debug(f'Registering schema: {name}') self.client.register_schema(name, schema) def init_directories(self): for i in self.datastore.query('directories'): try: directory = Directory(self, i) self.directories.append(directory) directory.configure() except: continue def load_config(self): self.search_order = self.configstore.get('directory.search_order') self.cache_ttl = self.configstore.get('directory.cache_ttl') self.cache_enumerations = self.configstore.get( 'directory.cache_enumerations') self.cache_lookups = self.configstore.get('directory.cache_lookups') self.home_directory_root = self.configstore.get( 'system.home_directory_root') def checkin(self): checkin() def main(self): parser = argparse.ArgumentParser() parser.add_argument('-c', metavar='CONFIG', default=DEFAULT_CONFIGFILE, help='Middleware config file') parser.add_argument('-s', metavar='SOCKET', default=DEFAULT_SOCKET_ADDRESS, help='Socket address to listen on') args = parser.parse_args() configure_logging('dscached', 'DEBUG') setproctitle('dscached') self.config = args.c self.parse_config(self.config) self.init_datastore() self.init_dispatcher() self.load_config() self.init_server(args.s) self.scan_plugins() self.register_schemas() self.wait_for_etcd() self.init_directories() self.checkin() self.client.wait_forever()
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) def migrate_cert(certs): id_uuid_map = {} signedby = [] for obj in certs: if obj.cert_type == 0x1: _type = 'CA_EXISTING' elif obj.cert_type == 0x2: _type = 'CA_INTERNAL' elif obj.cert_type == 0x4: _type = 'CA_INTERMEDIATE' elif obj.cert_type == 0x8: _type = 'CERT_EXISTING' elif obj.cert_type == 0x10: _type = 'CERT_INTERNAL' else: _type = 'CERT_CSR' cert = { 'type': _type, 'name': obj.cert_name, 'certificate': obj.cert_certificate, 'privatekey': obj.cert_privatekey, 'csr': obj.cert_CSR, 'key_length': obj.cert_key_length, 'digest_algorithm': obj.cert_digest_algorithm, 'lifetime': obj.cert_lifetime, 'country': obj.cert_country, 'state': obj.cert_state, 'city': obj.cert_city, 'organization': obj.cert_organization, 'email': obj.cert_email, 'common': obj.cert_common, 'serial': obj.cert_serial, } pkey = ds.insert('crypto.certificates', cert) id_uuid_map[obj.id] = pkey if obj.cert_signedby is not None: signedby.append(obj.id) return id_uuid_map, signedby def migrate_signedby(model, id_uuid_map, signedby, ca_map): for id in signedby: cobj = model.objects.get(id=id) pkey = id_uuid_map.get(id) if pkey is None: continue cert = ds.get_by_id('crypto.certificates', pkey) if cobj.cert_signedby is None: continue signedby = ca_map.get(cobj.cert_signedby.id) if signedby is None: continue cert['signedby'] = signedby ds.update('crypto.certificates', pkey, cert) id_uuid_map, signedby = migrate_cert(orm['system.CertificateAuthority'].objects.order_by('cert_signedby')) migrate_signedby(orm['system.CertificateAuthority'], id_uuid_map, signedby, id_uuid_map) cert_id_uuid_map, cert_signedby = migrate_cert(orm['system.Certificate'].objects.order_by('cert_signedby')) migrate_signedby(orm['system.Certificate'], cert_id_uuid_map, cert_signedby, id_uuid_map) settings = orm['system.Settings'].objects.order_by('-id')[0] if settings.stg_guicertificate: uuid = cert_id_uuid_map.get(settings.stg_guicertificate.id) if uuid: cs.set('service.nginx.https.certificate', uuid) ds.collection_record_migration('crypto.certificates', 'freenas9_migration')
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.call_sync('management.enable_features', ['streaming_responses']) self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel( self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = self.module_cache.get(task['filename']) if not module: module = load_module_from_file(name, task['filename']) self.module_cache[task['filename']] = module setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) fds = list(self.collect_fds(task['args'])) try: dispatcher = DispatcherWrapper(self.conn) self.instance = getattr(module, task['class'])(dispatcher, self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() self.run_task_hooks(self.instance, task, 'before') result = self.instance.run(*task['args']) self.run_task_hooks(self.instance, task, 'after', result=result) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format( str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) # Main task is already failed at this point, so ignore hook errors with contextlib.suppress(RpcException): self.run_task_hooks(self.instance, task, 'error', error=serialize_error(err)) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ddns = orm['services.DynamicDNS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='dynamicdns') svc = svc[0] if svc.exists() else None if svc: cs.set('service.dyndns.enable', svc.srv_enable) if ddns.ddns_provider: cs.set('service.dyndns.provider', ddns.ddns_provider) if ddns.ddns_ipserver: cs.set('service.dyndns.ipserver', ddns.ddns_ipserver) if ddns.ddns_domain: cs.set('service.dyndns.domains', ddns.ddns_domain.split(',')) cs.set('service.dyndns.username', ddns.ddns_username) try: pwd = notifier().pwenc_decrypt(ddns.ddns_password) except: pwd = '' cs.set('service.dyndns.password', pwd) if ddns.ddns_updateperiod: cs.set('service.dyndns.update_period', ddns.ddns_updateperiod) if ddns.ddns_fupdateperiod: cs.set('service.dyndns.force_update_period', ddns.ddns_fupdateperiod) if ddns.ddns_options: cs.set('service.dyndns.auxiliary', ddns.ddns_options)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ups = orm['services.UPS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='ups') svc = svc[0] if svc.exists() else None if svc: cs.set('service.ups.enable', svc.srv_enable) cs.set('service.ups.mode', ups.ups_mode.upper()) cs.set('service.ups.identifier', ups.ups_identifier) cs.set('service.ups.remote_host', ups.ups_remotehost) cs.set('service.ups.remote_port', ups.ups_remoteport) cs.set('service.ups.driver', ups.ups_driver.split('$')[0]) cs.set('service.ups.driver_port', ups.ups_port) cs.set('service.ups.auxiliary', ups.ups_options or None) cs.set('service.ups.description', ups.ups_description or None) cs.set('service.ups.shutdown_mode', ups.ups_shutdown.upper()) cs.set('service.ups.shutdown_timer', ups.ups_shutdowntimer) cs.set('service.ups.monitor_user', ups.ups_monuser) cs.set('service.ups.monitor_password', ups.ups_monpwd) cs.set('service.ups.auxiliary_users', ups.ups_extrausers or None) cs.set('service.ups.monitor_remote', ups.ups_rmonitor) cs.set('service.ups.email_notify', ups.ups_emailnotify) cs.set('service.ups.email_recipients', ups.ups_toemail or []) cs.set('service.ups.email_subject', ups.ups_subject) cs.set('service.ups.powerdown', ups.ups_powerdown)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) cifs = orm['services.CIFS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='cifs') svc = svc[0] if svc.exists() else None if svc: cs.set('service.cifs.enable', svc.srv_enable) cs.set('service.cifs.netbiosname', [cifs.cifs_srv_netbiosname]) cs.set('service.cifs.workgroup', cifs.cifs_srv_workgroup) cs.set('service.cifs.description', cifs.cifs_srv_description) cs.set('service.cifs.dos_charset', cifs.cifs_srv_doscharset) cs.set('service.cifs.unix_charset', cifs.cifs_srv_unixcharset) loglevel_map = { '0': 'NONE', '1': 'MINIMUM', '2': 'NORMAL', '3': 'FULL', '10': 'DEBUG', } cs.set('service.cifs.log_level', loglevel_map.get(str(cifs.cifs_srv_loglevel), 'MINIMUM')) cs.set('service.cifs.syslog', cifs.cifs_srv_syslog) cs.set('service.cifs.local_master', cifs.cifs_srv_localmaster) cs.set('service.cifs.domain_logons', cifs.cifs_srv_domain_logons) cs.set('service.cifs.time_server', cifs.cifs_srv_timeserver) cs.set('service.cifs.guest_user', cifs.cifs_srv_guest) cs.set('service.cifs.filemask', cifs.cifs_srv_filemask or None) cs.set('service.cifs.dirmask', cifs.cifs_srv_dirmask or None) cs.set('service.cifs.empty_password', cifs.cifs_srv_nullpw) cs.set('service.cifs.unixext', cifs.cifs_srv_unixext) cs.set('service.cifs.zeroconf', cifs.cifs_srv_zeroconf) cs.set('service.cifs.hostlookup', cifs.cifs_srv_hostlookup) if cifs.cifs_srv_min_protocol: cs.set('service.cifs.min_protocol', cifs.cifs_srv_min_protocol) cs.set('service.cifs.max_protocol', cifs.cifs_srv_max_protocol) cs.set('service.cifs.execute_always', cifs.cifs_srv_allow_execute_always) cs.set('service.cifs.obey_pam_restrictions', cifs.cifs_srv_obey_pam_restrictions) if cifs.cifs_srv_bindip: cs.set('service.cifs.bind_addresses', cifs.cifs_srv_bindip) cs.set('service.cifs.sid', cifs.cifs_SID) cs.set('service.cifs.auxiliary', cifs.cifs_srv_smb_options)