def init_datastore(self): try: self.datastore = get_datastore(self.config) self.datastore_log = get_datastore(self.config, log=True) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for disk in orm['storage.Disk'].objects.all(): dev = self.identifier_to_device(disk.disk_identifier) if not dev: print("Identifier to device failed for {0}, skipping".format(disk.disk_identifier)) continue newident = self.device_to_identifier(dev, serial=(disk.disk_serial or None)) if not newident: print("Failed to convert {0} to id, skipping".format(dev)) ds.insert('disks', { 'id': newident, 'name': disk.disk_name, 'serial': disk.disk_serial, 'smart': disk.disk_togglesmart, 'smart_options': disk.disk_smartoptions, 'standby_mode': None if disk.disk_hddstandby == 'Always On' else int(disk.disk_hddstandby), 'acoustic_level': disk.disk_acousticlevel.upper(), 'apm_mode': None if disk.disk_advpowermgmt == 'Disabled' else int(disk.disk_advpowermgmt), }) disk.disk_identifier = newident disk.save()
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) settings = orm['system.Settings'].objects.order_by('-id')[0] cs.set('system.language', settings.stg_language) cs.set('system.timezone', settings.stg_timezone) cs.set('system.console.keymap', settings.stg_kbdmap) cs.set('system.syslog_server', settings.stg_syslogserver) listen = [] if settings.stg_guiaddress: listen.append(settings.stg_guiaddress) if settings.stg_guiv6address: listen.append('[{0}]'.format(settings.stg_guiv6address)) cs.set('service.nginx.http.enable', settings.stg_guiprotocol in ('http', 'httphttps')) cs.set('service.nginx.https.enable', settings.stg_guiprotocol in ('https', 'httphttps')) cs.set('service.nginx.listen', listen) cs.set('service.nginx.http.port', settings.stg_guiport) cs.set('service.nginx.http.redirect_https', settings.stg_guihttpsredirect) cs.set('service.nginx.https.port', settings.stg_guihttpsport)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) afp = orm['services.AFP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='afp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.afp.enable', svc.srv_enable) cs.set('service.afp.guest_enable', afp.afp_srv_guest) if afp.afp_srv_guest_user: cs.set('service.afp.guest_user', afp.afp_srv_guest_user) if afp.afp_srv_bindip: cs.set('service.afp.bind_addresses', afp.afp_srv_bindip) cs.set('service.afp.connections_limit', afp.afp_srv_connections_limit) cs.set('service.afp.homedir_enable"', afp.afp_srv_homedir_enable) if afp.afp_srv_homedir: cs.set('service.afp.homedir_path', afp.afp_srv_homedir) if afp.afp_srv_homename: cs.set('service.afp.homedir_name', afp.afp_srv_homename) if afp.afp_srv_dbpath: cs.set('service.afp.dbpath', afp.afp_srv_dbpath) if afp.afp_srv_global_aux: cs.set('service.afp.auxiliary', afp.afp_srv_global_aux)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ups = orm['services.UPS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='ups') svc = svc[0] if svc.exists() else None if svc: cs.set('service.ups.enable', svc.srv_enable) cs.set('service.ups.mode', ups.ups_mode.upper()) cs.set('service.ups.identifier', ups.ups_identifier) cs.set('service.ups.remote_host', ups.ups_remotehost) cs.set('service.ups.remote_port', ups.ups_remoteport) cs.set('service.ups.driver', ups.ups_driver.split('$')[0]) cs.set('service.ups.driver_port', ups.ups_port) cs.set('service.ups.auxiliary', ups.ups_options or None) cs.set('service.ups.description', ups.ups_description or None) cs.set('service.ups.shutdown_mode', ups.ups_shutdown.upper()) cs.set('service.ups.shutdown_timer', ups.ups_shutdowntimer) cs.set('service.ups.monitor_user', ups.ups_monuser) cs.set('service.ups.monitor_password', ups.ups_monpwd) cs.set('service.ups.auxiliary_users', ups.ups_extrausers or None) cs.set('service.ups.monitor_remote', ups.ups_rmonitor) cs.set('service.ups.email_notify', ups.ups_emailnotify) cs.set('service.ups.email_recipients', ups.ups_toemail or []) cs.set('service.ups.email_subject', ups.ups_subject) cs.set('service.ups.powerdown', ups.ups_powerdown)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) webdav = orm['services.WebDAV'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='webdav') svc = svc[0] if svc.exists() else None if svc: cs.set('service.webdav.enable', svc.srv_enable) if webdav.webdav_protocol == 'http': protocol = ['HTTP'] elif webdav.webdav_protocol == 'https': protocol = ['HTTPS'] else: protocol = ['HTTP', 'HTTPS'] cs.set('service.webdav.protocol', protocol) cs.set('service.webdav.http_port', webdav.webdav_tcpport or 8080) cs.set('service.webdav.https_port', webdav.webdav_tcpportssl or 8081) try: if webdav.webdav_password: from freenasUI.middleware.notifier import notifier cs.set('service.webdav.password', notifier().pwenc_decrypt(webdav.webdav_password)) except: pass cs.set('service.webdav.authentication', webdav.webdav_htauth.upper())
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ddns = orm['services.DynamicDNS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='dynamicdns') svc = svc[0] if svc.exists() else None if svc: cs.set('service.dyndns.enable', svc.srv_enable) if ddns.ddns_provider: cs.set('service.dyndns.provider', ddns.ddns_provider) if ddns.ddns_ipserver: cs.set('service.dyndns.ipserver', ddns.ddns_ipserver) if ddns.ddns_domain: cs.set('service.dyndns.domains', ddns.ddns_domain.split(',')) cs.set('service.dyndns.username', ddns.ddns_username) try: pwd = notifier().pwenc_decrypt(ddns.ddns_password) except: pwd = '' cs.set('service.dyndns.password', pwd) if ddns.ddns_updateperiod: cs.set('service.dyndns.update_period', ddns.ddns_updateperiod) if ddns.ddns_fupdateperiod: cs.set('service.dyndns.force_update_period', ddns.ddns_fupdateperiod) if ddns.ddns_options: cs.set('service.dyndns.auxiliary', ddns.ddns_options)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() return # To be fixed for share in orm['sharing.CIFS_Share'].objects.all(): ds.insert('shares', { 'description': share.cifs_comment, 'enabled': True, }) for share in orm['sharing.AFP_Share'].objects.all(): ds.insert('shares', { 'enabled': True, }) for share in orm['sharing.NFS_Share'].objects.all(): ds.insert('shares', { 'enabled': True, }) for share in orm['sharing.WebDAV_Share'].objects.all(): ds.insert('shares', { 'enabled': True, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) snmp = orm['services.SNMP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='snmp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.snmp.enable', svc.srv_enable) cs.set('service.snmp.location', snmp.snmp_location or None) cs.set('service.snmp.contact', snmp.snmp_contact or None) cs.set('service.snmp.community', snmp.snmp_community or 'public') cs.set('service.snmp.v3', snmp.snmp_v3) cs.set('service.snmp.v3_username', snmp.snmp_v3_username or None) cs.set('service.snmp.v3_password', snmp.snmp_v3_password or None) cs.set('service.snmp.v3_auth_type', snmp.snmp_v3_authtype or 'SHA') cs.set('service.snmp.v3_privacy_protocol', snmp.snmp_v3_privproto or 'AES') cs.set('service.snmp.v3_privacy_passphrase', snmp.snmp_v3_privpassphrase or None) cs.set('service.snmp.auxiliary', snmp.snmp_options or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if "FREENAS_INSTALL" in os.environ: return ds = get_datastore() for disk in orm["storage.Disk"].objects.all(): dev = self.identifier_to_device(disk.disk_identifier) if not dev: print("Identifier to device failed for {0}, skipping".format(disk.disk_identifier)) continue newident = self.device_to_identifier(dev, serial=(disk.disk_serial or None)) if not newident: print("Failed to convert {0} to id, skipping".format(dev)) ds.insert( "disks", { "id": newident, "name": disk.disk_name, "serial": disk.disk_serial, "smart": disk.disk_togglesmart, "smart_options": disk.disk_smartoptions, "standby_mode": None if disk.disk_hddstandby == "Always On" else int(disk.disk_hddstandby), "acoustic_level": disk.disk_acousticlevel.upper(), "apm_mode": None if disk.disk_advpowermgmt == "Disabled" else int(disk.disk_advpowermgmt), }, ) disk.disk_identifier = newident disk.save()
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) nfs = orm['services.NFS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='nfs') svc = svc[0] if svc.exists() else None if svc: cs.set('service.nfs.enable', svc.srv_enable) cs.set('service.nfs.servers', nfs.nfs_srv_servers) cs.set('service.nfs.udp', nfs.nfs_srv_udp) cs.set('service.nfs.nonroot', nfs.nfs_srv_allow_nonroot) cs.set('service.nfs.v4', nfs.nfs_srv_v4) cs.set('service.nfs.v4_kerberos', nfs.nfs_srv_v4_krb) if nfs.nfs_srv_bindip: cs.set('service.nfs.bind_addresses', nfs.nfs_srv_bindip.split(',')) if nfs.nfs_srv_mountd_port: cs.set('service.nfs.mountd_port', nfs.nfs_srv_mountd_port) if nfs.nfs_srv_rpcstatd_port: cs.set('service.nfs.rpcstatd_port', nfs.nfs_srv_rpcstatd_port) if nfs.nfs_srv_rpclockd_port: cs.set('service.nfs.rpclockd_port', nfs.nfs_srv_rpclockd_port)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for disk in orm['storage.Disk'].objects.all(): dev = self.identifier_to_device(disk.disk_identifier) if not dev: print("Identifier to device failed for {0}, skipping".format(disk.disk_identifier)) continue newident = self.device_to_identifier(dev, serial=(disk.disk_serial or None)) if not newident: print("Failed to convert {0} to id, skipping".format(dev)) ds.insert('disks', { 'id': newident, 'serial': disk.disk_serial, 'description': disk.disk_description, 'smart': disk.disk_togglesmart, 'smart_options': disk.disk_smartoptions, 'standby_mode': None if disk.disk_hddstandby == 'Always On' else int(disk.disk_hddstandby), 'acoustic_level': disk.disk_acousticlevel.upper(), 'apm_mode': None if disk.disk_advpowermgmt == 'Disabled' else int(disk.disk_advpowermgmt), }) disk.disk_identifier = newident disk.save()
def init_datastore(self): try: self.datastore = datastore.get_datastore() except datastore.DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def init_datastore(self, resume=False): try: self.datastore = get_datastore(self.config) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def init_datastore(self): try: self.datastore = get_datastore(self.config['datastore']['driver'], self.config['datastore']['dsn']) except DatastoreException as err: self.logger.error('Cannot initialize datastore: %s', str(err)) sys.exit(1) self.configstore = ConfigStore(self.datastore)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() testtype_map = { 'L': 'LONG', 'S': 'SHORT', 'C': 'CONVEYANCE', 'O': 'OFFLINE', } for smart in orm['tasks.SMARTTest'].objects.all(): day_of_week = [] for w in smart.smarttest_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) testtype = testtype_map.get(smart.smarttest_type, 'LONG') disk_ids = [] for disk in smart.smarttest_disks.all(): if not disk.disk_identifier: continue disk_ids.append(disk.disk_identifier) if not disk_ids: continue ds.insert( 'schedulerd.runs', { 'id': 'smarttest_{0}'.format(smart.id), 'description': 'SMART Test {0}'.format(testtype), 'name': 'disk.parallel_test', 'args': [disk_ids, testtype], 'enabled': True, 'schedule': { 'year': '*', 'month': smart.smarttest_month, 'week': '*', 'day_of_week': day_of_week, 'day': smart.smarttest_daymonth, 'hour': smart.smarttest_hour, 'minute': 0, 'second': 0, } })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for g in orm['account.bsdGroups'].objects.filter(bsdgrp_builtin=False): ds.insert('groups', { 'id': str(uuid.uuid4()), 'gid': g.bsdgrp_gid, 'name': g.bsdgrp_group, 'bultin': False, 'sudo': g.bsdgrp_sudo, }) for u in orm['account.bsdUsers'].objects.all(): groups = [] for bgm in orm['account.bsdGroupMembership'].objects.filter(bsdgrpmember_user=u): groups.append(bgm.bsdgrpmember_group.bsdgrp_gid) if u.bsdusr_builtin: user = ds.get_one('users', ('uid', '=', u.bsdusr_uid)) if user is None: continue user.update({ 'email': u.bsdusr_email, 'unixhash': u.bsdusr_unixhash, 'smbhash': u.bsdusr_smbhash, 'groups': groups, }) ds.upsert('users', user['id'], user) continue ds.insert('users', { 'id': str(uuid.uuid4()), 'uid': u.bsdusr_uid, 'username': u.bsdusr_username, 'unixhash': u.bsdusr_unixhash, 'smbhash': u.bsdusr_smbhash, 'group': u.bsdusr_group.bsdgrp_gid, 'home': u.bsdusr_home, 'shell': u.bsdusr_shell, 'full_name': u.bsdusr_full_name, 'builtin': False, 'email': u.bsdusr_email, 'password_disabled': u.bsdusr_password_disabled, 'locked': u.bsdusr_locked, 'sudo': u.bsdusr_sudo, 'sshpubkey': bsdusr_sshpubkey(u), 'groups': groups, }) ds.collection_record_migration('groups', 'freenas9_migration') ds.collection_record_migration('users', 'freenas9_migration')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() testtype_map = { 'L': 'LONG', 'S': 'SHORT', 'C': 'CONVEYANCE', 'O': 'OFFLINE', } for smart in orm['tasks.SMARTTest'].objects.all(): day_of_week = [] for w in smart.smarttest_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) testtype = testtype_map.get(smart.smarttest_type, 'LONG') disk_ids = [] for disk in smart.smarttest_disks.all(): if not disk.disk_identifier: continue disk_ids.append(disk.disk_identifier) if not disk_ids: continue ds.insert('schedulerd.runs', { 'id': 'smarttest_{0}'.format(smart.id), 'description': 'SMART Test {0}'.format(testtype), 'name': 'disk.parallel_test', 'args': [disk_ids, testtype], 'enabled': True, 'schedule': { 'year': '*', 'month': smart.smarttest_month, 'week': '*', 'day_of_week': day_of_week, 'day': smart.smarttest_daymonth, 'hour': smart.smarttest_hour, 'minute': 0, 'second': 0, } })
def init_datastore(self): try: self.datastore = datastore.get_datastore(log=True) self.datastore.insert('boots', { 'id': self.boot_id, 'booted_at': self.started_at, 'hostname': socket.gethostname() }) except datastore.DatastoreException as err: logging.error('Cannot initialize datastore: %s', str(err)) sys.exit(1)
def init_datastore(self): try: self.datastore = datastore.get_datastore(log=True) self.datastore.insert( 'boots', { 'id': self.boot_id, 'booted_at': self.started_at, 'hostname': socket.gethostname() }) except datastore.DatastoreException as err: logging.error('Cannot initialize datastore: %s', str(err)) sys.exit(1)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) cifs = orm['services.CIFS'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='cifs') svc = svc[0] if svc.exists() else None if svc: cs.set('service.cifs.enable', svc.srv_enable) cs.set('service.cifs.netbiosname', [cifs.cifs_srv_netbiosname]) cs.set('service.cifs.workgroup', cifs.cifs_srv_workgroup) cs.set('service.cifs.description', cifs.cifs_srv_description) cs.set('service.cifs.dos_charset', cifs.cifs_srv_doscharset) cs.set('service.cifs.unix_charset', cifs.cifs_srv_unixcharset) loglevel_map = { '0': 'NONE', '1': 'MINIMUM', '2': 'NORMAL', '3': 'FULL', '10': 'DEBUG', } cs.set('service.cifs.log_level', loglevel_map.get(str(cifs.cifs_srv_loglevel), 'MINIMUM')) cs.set('service.cifs.syslog', cifs.cifs_srv_syslog) cs.set('service.cifs.local_master', cifs.cifs_srv_localmaster) cs.set('service.cifs.domain_logons', cifs.cifs_srv_domain_logons) cs.set('service.cifs.time_server', cifs.cifs_srv_timeserver) cs.set('service.cifs.guest_user', cifs.cifs_srv_guest) cs.set('service.cifs.filemask', cifs.cifs_srv_filemask or None) cs.set('service.cifs.dirmask', cifs.cifs_srv_dirmask or None) cs.set('service.cifs.empty_password', cifs.cifs_srv_nullpw) cs.set('service.cifs.unixext', cifs.cifs_srv_unixext) cs.set('service.cifs.zeroconf', cifs.cifs_srv_zeroconf) cs.set('service.cifs.hostlookup', cifs.cifs_srv_hostlookup) if cifs.cifs_srv_min_protocol: cs.set('service.cifs.min_protocol', cifs.cifs_srv_min_protocol) cs.set('service.cifs.max_protocol', cifs.cifs_srv_max_protocol) cs.set('service.cifs.execute_always', cifs.cifs_srv_allow_execute_always) cs.set('service.cifs.obey_pam_restrictions', cifs.cifs_srv_obey_pam_restrictions) if cifs.cifs_srv_bindip: cs.set('service.cifs.bind_addresses', cifs.cifs_srv_bindip) cs.set('service.cifs.sid', cifs.cifs_SID) cs.set('service.cifs.auxiliary', cifs.cifs_srv_smb_options)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for vol in orm['storage.Volume'].objects.all(): ds.insert('volumes', { 'id': vol.vol_guid, 'name': vol.vol_name, 'type': 'zfs', 'mountpoint': '/mnt/{0}'.format(vol.vol_name), 'attributes': {}, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for scrub in orm['storage.Scrub'].objects.all(): day_of_week = [] for w in scrub.scrub_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) ds.insert( 'schedulerd.runs', { 'id': 'scrub_{0}_{1}'.format(scrub.scrub_volume.vol_name, scrub.id), 'description': scrub.scrub_description, 'name': 'zfs.pool.scrub', 'args': [scrub.scrub_volume.vol_name, scrub.scrub_threshold], 'enabled': scrub.scrub_enabled, 'schedule': { 'year': '*', 'month': scrub.scrub_month, 'week': '*', 'day_of_week': day_of_week, 'day': scrub.scrub_daymonth, 'hour': scrub.scrub_hour, 'minute': scrub.scrub_minute, 'second': 0, } })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) lldp = orm['services.LLDP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='lldp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.lldp.enable', svc.srv_enable) cs.set('service.lldp.save_description', lldp.lldp_intdesc) cs.set('service.lldp.country_code', lldp.lldp_country or None) cs.set('service.lldp.location', lldp.lldp_location or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) ssh = orm['services.SSH'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='ssh') svc = svc[0] if svc.exists() else None if svc: cs.set('service.sshd.enable', svc.srv_enable) cs.set('service.sshd.port', ssh.ssh_tcpport) cs.set('service.sshd.permit_root_login', ssh.ssh_rootlogin) cs.set('service.sshd.allow_password_auth', ssh.ssh_passwordauth) cs.set('service.sshd.allow_port_forwarding', ssh.ssh_tcpfwd) cs.set('service.sshd.compression', ssh.ssh_compression) cs.set('service.sshd.sftp_log_level', ssh.ssh_sftp_log_level) cs.set('service.sshd.sftp_log_facility', ssh.ssh_sftp_log_facility) cs.set('service.sshd.auxiliary', ssh.ssh_options or None) cs.set('service.sshd.keys.host.private', ssh.ssh_host_key) cs.set('service.sshd.keys.host.public', ssh.ssh_host_key_pub) cs.set('service.sshd.keys.dsa.private', ssh.ssh_host_dsa_key or None) cs.set('service.sshd.keys.dsa.public', ssh.ssh_host_dsa_key_pub or None) if ssh.ssh_host_dsa_key_cert_pub: cs.set('service.sshd.keys.dsa.certificate', ssh.ssh_host_dsa_key_cert_pub) cs.set('service.sshd.keys.ecdsa.private', ssh.ssh_host_ecdsa_key or None) cs.set('service.sshd.keys.ecdsa.public', ssh.ssh_host_ecdsa_key_pub or None) if ssh.ssh_host_ecdsa_key_cert_pub: cs.set('service.sshd.keys.ecdsa.certificate', ssh.ssh_host_ecdsa_key_cert_pub) cs.set('service.sshd.keys.ed25519.private', ssh.ssh_host_ed25519_key or None) cs.set('service.sshd.keys.ed25519.public', ssh.ssh_host_ed25519_key_pub or None) if ssh.ssh_host_ed25519_key_cert_pub: cs.set('service.sshd.keys.ed25519.certificate', ssh.ssh_host_ed25519_key_cert_pub) cs.set('service.sshd.keys.rsa.private', ssh.ssh_host_rsa_key or None) cs.set('service.sshd.keys.rsa.public', ssh.ssh_host_rsa_key_pub or None) if ssh.ssh_host_rsa_key_cert_pub: cs.set('service.sshd.keys.rsa.certificate', ssh.ssh_host_rsa_key_cert_pub)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) iscsi = orm['services.iSCSITargetGlobalConfiguration'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='iscsi') svc = svc[0] if svc.exists() else None if svc: cs.set('service.iscsi.enable', svc.srv_enable) cs.set('service.iscsi.base_name', iscsi.iscsi_basename) cs.set('service.iscsi.isns_servers', iscsi.iscsi_isns_servers) cs.set('service.iscsi.pool_space_threshold', iscsi.iscsi_pool_avail_threshold) # iSCSI Portals iscsi_portals = orm['services.iSCSITargetPortal'].objects.all() for p in iscsi_portals: ds.insert('iscsi.portals', { 'id': 'pg{0}'.format(p.id), 'tag': p.iscsi_target_portal_tag, 'description': p.iscsi_target_portal_comment, 'discovery_auth_group': 'ag{0}'.format(p.iscsi_target_portal_discoveryauthgroup), 'listen': [{'address': i.iscsi_target_portalip_ip, 'port': i.iscsi_target_portalip_port} for i in p.ips.all()] }) # iSCSI Targets iscsi_targets = orm['services.iSCSITarget'].objects.all() for t in iscsi_targets: ds.insert('iscsi.targets', { 'id': t.iscsi_target_name, 'portal_group': 'default', # XXX: Needs to pass proper portal group 'auth_group': 'no-authentication', # XXX: Needs to pass proper auth group 'description': t.iscsi_target_alias, 'extents': [] })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) smartd = orm['services.SMART'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='smartd') svc = svc[0] if svc.exists() else None if svc: cs.set('service.smartd.enable', svc.srv_enable) cs.set('service.smartd.interval', smartd.smart_interval) cs.set('service.smartd.power_mode', smartd.smart_powermode.upper()) cs.set('service.smartd.temp_difference', smartd.smart_difference or None) cs.set('service.smartd.temp_informational', smartd.smart_informational or None) cs.set('service.smartd.temp_critical', smartd.smart_critical or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) tftp = orm['services.TFTP'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='tftp') svc = svc[0] if svc.exists() else None if svc: cs.set('service.tftp.enable', svc.srv_enable) cs.set('service.tftp.path', tftp.tftp_directory or None) cs.set('service.tftp.allow_new_files', tftp.tftp_newfiles) cs.set('service.tftp.port', tftp.tftp_port) cs.set('service.tftp.username', tftp.tftp_username) cs.set('service.tftp.umask', tftp.tftp_umask) cs.set('service.tftp.auxiliary', tftp.tftp_options or None)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() # If we are migrating delete all entries in mongodb # because we don't know what the user changed/has in 9.3 for ntp in ds.query('ntpservers'): ds.delete('ntpservers', ntp['id']) for ntp in orm['system.NTPServer'].objects.all(): ds.insert('ntpservers', { 'address': ntp.ntp_address, 'burst': ntp.ntp_burst, 'iburst': ntp.ntp_iburst, 'prefer': ntp.ntp_prefer, 'minpoll': ntp.ntp_minpoll, 'maxpoll': ntp.ntp_maxpoll, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) rsyncd = orm['services.Rsyncd'].objects.all()[0] svc = orm['services.services'].objects.filter(srv_service='rsync') svc = svc[0] if svc.exists() else None if svc: cs.set('service.rsyncd.enable', svc.srv_enable) cs.set('service.rsyncd.port', rsyncd.rsyncd_port) cs.set('service.rsyncd.auxiliary', rsyncd.rsyncd_auxiliary) for rmod in orm['services.RsyncMod'].objects.all(): if rmod.rsyncmod_path == 'ro': mode = 'READONLY' elif rmod.rsyncmod_path == 'wo': mode = 'WRITEONLY' else: mode = 'READWRITE' ds.insert('rsyncd-module', { 'name': rmod.rsyncmod_name, 'description': rmod.rsyncmod_comment or None, 'path': rmod.rsyncmod_path, 'mode': mode, 'max_connections': rmod.rsyncmod_maxconn or None, 'user': rmod.rsyncmod_user, 'group': rmod.rsyncmod_group, 'hosts_allow': rmod.rsyncmod_hostsallow or None, 'hosts_deny': rmod.rsyncmod_hostsdeny or None, 'auxiliary': rmod.rsyncmod_auxiliary or None, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for scrub in orm['storage.Scrub'].objects.all(): day_of_week = [] for w in scrub.scrub_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) ds.insert('schedulerd.runs', { 'id': 'scrub_{0}_{1}'.format(scrub.scrub_volume.vol_name, scrub.id), 'description': scrub.scrub_description, 'name': 'zfs.pool.scrub', 'args': [scrub.scrub_volume.vol_name, scrub.scrub_threshold], 'enabled': scrub.scrub_enabled, 'schedule': { 'year': '*', 'month': scrub.scrub_month, 'week': '*', 'day_of_week': day_of_week, 'day': scrub.scrub_daymonth, 'hour': scrub.scrub_hour, 'minute': scrub.scrub_minute, 'second': 0, } })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for cron in orm['tasks.CronJob'].objects.all(): day_of_week = [] for w in cron.cron_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) ds.insert('schedulerd.runs', { 'id': 'cronjob_{0}_{1}'.format(cron.cron_user, cron.id), 'description': cron.cron_description, 'name': 'calendar_task.command', 'args': [cron.cron_user, cron.cron_command], 'enabled': cron.cron_enabled, 'schedule': { 'year': '*', 'month': cron.cron_month, 'week': '*', 'day_of_week': day_of_week, 'day': cron.cron_daymonth, 'hour': cron.cron_hour, 'minute': cron.cron_minute, 'second': 0, } })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for tun in orm['system.Tunable'].objects.all(): if tun.tun_type == 'sysctl': _type = 'SYSCTL' elif tun.tun_type == 'loader': _type = 'LOADER' else: _type = 'RC' ds.insert('tunables', { 'var': tun.tun_var, 'value': tun.tun_value, 'type': _type, 'comment': tun.tun_comment, 'enabled': tun.tun_enabled, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() # If we are migrating delete all entries in mongodb # because we don't know what the user changed/has in 9.3 for ntp in ds.query('ntpservers'): ds.delete('ntpservers', ntp['id']) for ntp in orm['system.NTPServer'].objects.all(): ds.insert( 'ntpservers', { 'address': ntp.ntp_address, 'burst': ntp.ntp_burst, 'iburst': ntp.ntp_iburst, 'prefer': ntp.ntp_prefer, 'minpoll': ntp.ntp_minpoll, 'maxpoll': ntp.ntp_maxpoll, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) email = orm['system.Email'].objects.order_by('-id')[0] cs.set('mail.from', email.em_fromemail) cs.set('mail.server', email.em_outgoingserver) cs.set('mail.port', email.em_port) encryption = 'PLAIN' if email.em_security in ('ssl', 'tls'): encryption = email.em_security.upper() cs.set('mail.encryption', encryption) cs.set('mail.auth', email.em_smtp) cs.set('mail.user', email.em_user) cs.set('mail.pass', email.em_pass)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for vol in orm['storage.Volume'].objects.all(): ds.insert( 'volumes', { 'id': vol.vol_name, 'guid': vol.vol_guid, 'type': 'zfs', 'key_encrypted': False, 'password_encrypted': False, 'encryption': { 'key': None, 'hashed_password': None, 'salt': None, 'slot': None }, 'attributes': {}, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for tun in orm['system.Tunable'].objects.all(): if tun.tun_type == 'sysctl': _type = 'SYSCTL' elif tun.tun_type == 'loader': _type = 'LOADER' else: _type = 'RC' ds.insert( 'tunables', { 'var': tun.tun_var, 'value': tun.tun_value, 'type': _type, 'comment': tun.tun_comment, 'enabled': tun.tun_enabled, })
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) adv = orm['system.Advanced'].objects.order_by('-id')[0] cs.set('system.console.cli', adv.adv_consolemenu) cs.set('system.console.screensaver', adv.adv_consolescreensaver) cs.set('system.serial.console', adv.adv_serialconsole) cs.set('system.serial.port', adv.adv_serialport) cs.set('system.serial.speed', int(adv.adv_serialspeed)) cs.set('service.powerd.enable', adv.adv_powerdaemon) cs.set('system.swapondrive', adv.adv_swapondrive) cs.set('system.autotune', adv.adv_autotune) cs.set('system.debug.kernel', adv.adv_debugkernel) cs.set('system.upload_crash', adv.adv_uploadcrash) cs.set('system.motd', adv.adv_motd) cs.set('system.boot_scrub_internal', adv.adv_boot_scrub) user = ds.query('users', ('username', '=', adv.adv_periodic_notifyuser), single=True) if user: cs.set('system.periodic.notify_user', user['id']) root = ds.query('users', ('uid', '=', 0), single=True) if root: root['attributes'].update({ 'gui_messages_footer': adv.adv_consolemsg, 'gui_traceback': adv.adv_traceback, 'gui_advancedmode': adv.adv_advancedmode, }) ds.update('users', root['id'], root)
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() for rsync in orm['tasks.Rsync'].objects.all(): day_of_week = [] for w in rsync.rsync_dayweek.split(','): try: day_of_week.append(WEEKDAYS[int(w) - 1]) except: pass if not day_of_week: day_of_week = '*' else: day_of_week = ','.join(day_of_week) if '@' in rsync.rsync_remotehost: remote_user, remote_host = rsync.rsync_remotehost.split('@', 1) else: remote_user = rsync.rsync_user remote_host = rsync.rsync_remotehost ds.insert( 'schedulerd.runs', { 'id': 'rsync_{0}_{1}'.format(rsync.rsync_user, rsync.id), 'description': rsync.rsync_desc, 'name': 'rsync.copy', 'args': [{ 'user': rsync.rsync_user, 'remote_user': remote_user, 'remote_host': remote_host, 'path': rsync.rsync_path, 'remote_path': rsync.rsync_remotepath, 'rsync_direction': rsync.rsync_direction.upper(), 'rsync_mode': rsync.rsync_mode.upper(), 'remote_ssh_port': rsync.rsync_remoteport, 'remote_module': rsync.rsync_remotemodule, 'rsync_properties': { 'recursive': rsync.rsync_recursive, 'compress': rsync.rsync_compress, 'times': rsync.rsync_times, 'archive': rsync.rsync_archive, 'delete': rsync.rsync_delete, 'preserve_permissions': rsync.rsync_preserveperm, 'preserve_attributes': rsync.rsync_preserveattr, 'delay_updates': rsync.rsync_delayupdates, 'extra': rsync.rsync_extra, }, 'quiet': rsync.rsync_quiet, }], 'enabled': rsync.rsync_enabled, 'schedule': { 'year': '*', 'month': rsync.rsync_month, 'week': '*', 'day_of_week': day_of_week, 'day': rsync.rsync_daymonth, 'hour': rsync.rsync_hour, 'minute': rsync.rsync_minute, 'second': 0, } })
def main(self): if len(sys.argv) != 2: print("Invalid number of arguments", file=sys.stderr) sys.exit(errno.EINVAL) key = sys.argv[1] configure_logging(None, logging.DEBUG) self.datastore = get_datastore() self.configstore = ConfigStore(self.datastore) self.conn = Client() self.conn.connect('unix:') self.conn.login_service('task.{0}'.format(os.getpid())) self.conn.enable_server() self.conn.call_sync('management.enable_features', ['streaming_responses']) self.conn.rpc.register_service_instance('taskproxy', self.service) self.conn.register_event_handler('task.progress', self.task_progress_handler) self.conn.call_sync('task.checkin', key) setproctitle.setproctitle('task executor (idle)') while True: try: task = self.task.get() logging.root.setLevel( self.conn.call_sync('management.get_logging_level')) setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) if task['debugger']: sys.path.append('/usr/local/lib/dispatcher/pydev') import pydevd host, port = task['debugger'] pydevd.settrace(host, port=port, stdoutToServer=True, stderrToServer=True) name, _ = os.path.splitext(os.path.basename(task['filename'])) module = self.module_cache.get(task['filename']) if not module: module = load_module_from_file(name, task['filename']) self.module_cache[task['filename']] = module setproctitle.setproctitle('task executor (tid {0})'.format( task['id'])) fds = list(self.collect_fds(task['args'])) try: dispatcher = DispatcherWrapper(self.conn) self.instance = getattr(module, task['class'])(dispatcher, self.datastore) self.instance.configstore = self.configstore self.instance.user = task['user'] self.instance.environment = task['environment'] self.running.set() self.run_task_hooks(self.instance, task, 'before') result = self.instance.run(*task['args']) self.run_task_hooks(self.instance, task, 'after', result=result) except BaseException as err: print("Task exception: {0}".format(str(err)), file=sys.stderr) traceback.print_exc(file=sys.stderr) if hasattr(self.instance, 'rollback'): self.put_status('ROLLBACK') try: self.instance.rollback(*task['args']) except BaseException as rerr: print("Task exception during rollback: {0}".format( str(rerr)), file=sys.stderr) traceback.print_exc(file=sys.stderr) # Main task is already failed at this point, so ignore hook errors with contextlib.suppress(RpcException): self.run_task_hooks(self.instance, task, 'error', error=serialize_error(err)) self.put_status('FAILED', exception=err) else: self.put_status('FINISHED', result=result) finally: self.close_fds(fds) self.running.clear() except RpcException as err: print("RPC failed: {0}".format(str(err)), file=sys.stderr) print(traceback.format_exc(), flush=True) sys.exit(errno.EBADMSG) except socket.error as err: print("Cannot connect to dispatcher: {0}".format(str(err)), file=sys.stderr) sys.exit(errno.ETIMEDOUT) if task['debugger']: import pydevd pydevd.stoptrace() setproctitle.setproctitle('task executor (idle)')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() # First ensure that no duplicate object is present between the two databses # This call will raise an error if a dup is found and will not proceed ensure_unique(ds, ('groups', 'gid'), orm_handle=orm, orm_tuple=('account.bsdGroups', 'bsdgrp_gid'), orm_query=Q(bsdgrp_builtin=False)) ensure_unique(ds, ('users', 'uid'), orm_handle=orm, orm_tuple=('account.bsdUsers', 'bsdusr_uid'), orm_query=Q(bsdusr_builtin=False)) # get all non-builtin groups for g in orm['account.bsdGroups'].objects.filter(bsdgrp_builtin=False): ds.insert( 'groups', { 'id': str(uuid.uuid4()), 'gid': g.bsdgrp_gid, 'builtin': g.bsdgrp_builtin, 'sudo': g.bsdgrp_sudo, 'name': g.bsdgrp_group }) for u in orm['account.bsdUsers'].objects.filter( Q(bsdusr_builtin=False) | Q(bsdusr_uid=0)): groups = [] for bgm in orm['account.bsdGroupMembership'].objects.filter( bsdgrpmember_user=u): grp = ds.query('groups', ('gid', '=', bgm.bsdgrpmember_group.bsdgrp_gid), single=True) if not grp: continue groups.append(grp['id']) grp = ds.query('groups', ('gid', '=', u.bsdusr_group.bsdgrp_gid), single=True) user_uuid = ds.query('users', ('uid', '=', u.bsdusr_uid), single=True) user_uuid = user_uuid['id'] if user_uuid else str(uuid.uuid4()) user = { 'id': user_uuid, 'uid': u.bsdusr_uid, 'password_disabled': u.bsdusr_password_disabled, 'email': u.bsdusr_email, 'group': grp['id'] if grp else NOGROUP_ID, 'home': u.bsdusr_home, 'full_name': u.bsdusr_full_name, 'username': u.bsdusr_username, 'sshpubkey': bsdusr_sshpubkey(u), 'shell': u.bsdusr_shell, 'locked': u.bsdusr_locked, 'unixhash': u.bsdusr_unixhash, 'sudo': u.bsdusr_sudo, 'groups': groups, 'attributes': {}, 'builtin': u.bsdusr_builtin } convert_smbhash(user, u.bsdusr_smbhash) ds.upsert('users', user_uuid, user) ds.collection_record_migration('groups', 'freenas9_migration') ds.collection_record_migration('users', 'freenas9_migration')
def forwards(self, orm): # Skip for install time, we only care for upgrades here if 'FREENAS_INSTALL' in os.environ: return ds = get_datastore() cs = ConfigStore(ds) def migrate_cert(certs): id_uuid_map = {} signedby = [] for obj in certs: if obj.cert_type == 0x1: _type = 'CA_EXISTING' elif obj.cert_type == 0x2: _type = 'CA_INTERNAL' elif obj.cert_type == 0x4: _type = 'CA_INTERMEDIATE' elif obj.cert_type == 0x8: _type = 'CERT_EXISTING' elif obj.cert_type == 0x10: _type = 'CERT_INTERNAL' else: _type = 'CERT_CSR' cert = { 'type': _type, 'name': obj.cert_name, 'certificate': obj.cert_certificate, 'privatekey': obj.cert_privatekey, 'csr': obj.cert_CSR, 'key_length': obj.cert_key_length, 'digest_algorithm': obj.cert_digest_algorithm, 'lifetime': obj.cert_lifetime, 'country': obj.cert_country, 'state': obj.cert_state, 'city': obj.cert_city, 'organization': obj.cert_organization, 'email': obj.cert_email, 'common': obj.cert_common, 'serial': obj.cert_serial, } pkey = ds.insert('crypto.certificates', cert) id_uuid_map[obj.id] = pkey if obj.cert_signedby is not None: signedby.append(obj.id) return id_uuid_map, signedby def migrate_signedby(model, id_uuid_map, signedby, ca_map): for id in signedby: cobj = model.objects.get(id=id) pkey = id_uuid_map.get(id) if pkey is None: continue cert = ds.get_by_id('crypto.certificates', pkey) if cobj.cert_signedby is None: continue signedby = ca_map.get(cobj.cert_signedby.id) if signedby is None: continue cert['signedby'] = signedby ds.update('crypto.certificates', pkey, cert) id_uuid_map, signedby = migrate_cert( orm['system.CertificateAuthority'].objects.order_by( 'cert_signedby')) migrate_signedby(orm['system.CertificateAuthority'], id_uuid_map, signedby, id_uuid_map) cert_id_uuid_map, cert_signedby = migrate_cert( orm['system.Certificate'].objects.order_by('cert_signedby')) migrate_signedby(orm['system.Certificate'], cert_id_uuid_map, cert_signedby, id_uuid_map) settings = orm['system.Settings'].objects.order_by('-id')[0] if settings.stg_guicertificate: uuid = cert_id_uuid_map.get(settings.stg_guicertificate.id) if uuid: cs.set('service.nginx.https.certificate', uuid) ds.collection_record_migration('crypto.certificates', 'freenas9_migration')
def forwards(self, orm): ds = get_datastore() cs = ConfigStore(ds) # Migrate global network configuration globalconf = orm.GlobalConfiguration.objects.order_by("-id")[0] cs.set('system.hostname', globalconf.gc_hostname + '.' + globalconf.gc_domain) cs.set('network.gateway.ipv4', globalconf.gc_ipv4gateway or None) cs.set('network.gateway.ipv6', globalconf.gc_ipv6gateway or None) cs.set('network.http_proxy', globalconf.gc_httpproxy or None) cs.set( 'network.dns.addresses', list( filter(None, [ globalconf.gc_nameserver1 or None, globalconf.gc_nameserver2 or None, globalconf.gc_nameserver3 or None, ]))) cs.set('network.netwait.enable', globalconf.gc_netwait_enabled) cs.set('network.netwait.addresses', globalconf.gc_netwait_ip.split()) old_hosts = [] # Migrate hosts database for line in globalconf.gc_hosts.split('\n'): line = line.strip() if not line: continue ip, *names = line.split(' ') old_hosts.extend([{ 'id': name, 'addresses': [ip] } for name in names]) ensure_unique(ds, ('network.hosts', 'id'), old_ids=[x['id'] for x in old_hosts]) for host in old_hosts: ds.insert('network.hosts', host) # Migrate VLAN interfaces configuration for unit, i in enumerate(orm.VLAN.objects.all()): ds.insert( 'network.interfaces', { 'id': 'vlan{0}'.format(unit), 'name': None, 'type': 'VLAN', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'vlan': { 'parent': i.vlan_pint, 'tag': i.vlan_tag }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate LAGG interfaces configuration for unit, i in enumerate(orm.LAGGInterface.objects.all()): ds.insert( 'network.interfaces', { 'id': 'lagg{0}'.format(unit), 'name': None, 'type': 'LAGG', 'cloned': True, 'enabled': True, 'dhcp': None, 'rtadv': False, 'noipv6': False, 'mtu': None, 'media': None, 'mediaopts': [], 'aliases': [], 'lagg': { 'protocol': LAGG_PROTOCOL_MAP[i.lagg_protocol], 'ports': [ m.int_interface for m in i.lagg_interfacemembers_set.all() ] }, 'capabilities': { 'add': [], 'del': [] } }) # Migrate IP configuration autoconfigure = True for i in orm.Interfaces.objects.all(): autoconfigure = False aliases = [] iface = ds.get_by_id('network.interfaces', i.int_interface) if not iface: iface = { 'enabled': True, } iface.update({ 'name': i.int_name, 'dhcp': i.int_dhcp, 'aliases': aliases }) if i.int_ipv4address: aliases.append({ 'type': 'INET', 'address': str(i.int_ipv4address), 'netmask': int(i.int_v4netmaskbit) }) if i.int_ipv6address: aliases.append({ 'type': 'INET6', 'address': str(i.int_ipv6address), 'netmask': int(i.int_v6netmaskbit) }) for alias in i.alias_set.all(): if alias.alias_v4address: aliases.append({ 'type': 'INET', 'address': str(alias.alias_v4address), 'netmask': int(alias.alias_v4netmaskbit) }) if alias.alias_v6address: aliases.append({ 'type': 'INET6', 'address': str(alias.alias_v6address), 'netmask': int(alias.alias_v6netmaskbit) }) m = re.search(r'mtu (\d+)', i.int_options) if m: iface['mtu'] = int(m.group(1)) m = re.search(r'media (\w+)', i.int_options) if m: iface['media'] = m.group(1) m = re.search(r'mediaopt (\w+)', i.int_options) if m: opt = m.group(1) if opt in MEDIAOPT_MAP: iface['mediaopts'] = [MEDIAOPT_MAP[opt]] # Try to read capabilities for k, v in CAPABILITY_MAP.items(): if '-{0}'.format(k) in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('del', []) l += v elif k in i.int_options: l = iface.setdefault('capabilities', {}).setdefault('add', []) l += v ds.upsert('network.interfaces', i.int_interface, iface) # If there are no interfaces, let it autoconfigure cs.set('network.autoconfigure', autoconfigure) # Migrate static routes for i in orm.StaticRoute.objects.all(): try: net = ipaddress.ip_network(i.sr_destination) except ValueError as e: print("Invalid network {0}: {1}".format(i.sr_destination, e)) continue ds.insert( 'network.routes', { 'network': str(net.network_address), 'netmask': net.prefixlen, 'gateway': i.sr_gateway, 'type': 'INET' }) ds.collection_record_migration('network.interfaces', 'freenas9_migration') ds.collection_record_migration('network.routes', 'freenas9_migration') ds.collection_record_migration('network.hosts', 'freenas9_migration')
def init_configstore(self): ds = datastore.get_datastore() self.configstore = datastore.config.ConfigStore(ds)
def __init__(self): self.ds = get_datastore() self._scheduler = None self._jobstore_alias = None