def info(self): """ Returns basic system information. """ uptime = Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, ).communicate()[0].strip() return { 'version': self.version(), 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0]), }
async def info(self): """ Returns basic system information. """ uptime = (await (await Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, )).communicate())[0].decode().strip() serial = (await(await Popen( ['dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None return { 'version': self.version(), 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'system_serial': serial, 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0] ), 'datetime': datetime.now(), }
def _system_info(request=None): # OS, hostname, release __, hostname, __ = os.uname()[0:3] platform = sysctl.filter('hw.model')[0].value physmem = '%dMB' % (sysctl.filter('hw.physmem')[0].value / 1048576, ) # All this for a timezone, because time.asctime() doesn't add it in. date = time.strftime('%a %b %d %H:%M:%S %Z %Y') + '\n' uptime = subprocess.check_output( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", shell=True) loadavg = "%.2f, %.2f, %.2f" % os.getloadavg() freenas_build = "Unrecognized build (%s missing?)" % VERSION_FILE try: with open(VERSION_FILE) as d: freenas_build = d.read() except: pass return { 'hostname': hostname, 'platform': platform, 'physmem': physmem, 'date': date, 'uptime': uptime, 'loadavg': loadavg, 'freenas_build': freenas_build, }
async def info(self): """ Returns basic system information. """ uptime = (await (await Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, )).communicate())[0].decode().strip() serial = (await(await Popen( ['dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None return { 'version': self.version(), 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'cores': sysctl.filter('hw.ncpu')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'system_serial': serial, 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0] ), 'datetime': datetime.now(), }
def _system_info(request=None): # OS, hostname, release __, hostname, __ = os.uname()[0:3] platform = sysctl.filter('hw.model')[0].value physmem = '%dMB' % (sysctl.filter('hw.physmem')[0].value / 1048576, ) # All this for a timezone, because time.asctime() doesn't add it in. date = time.strftime('%a %b %d %H:%M:%S %Z %Y') + '\n' uptime = subprocess.check_output( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", shell=True) loadavg = "%.2f, %.2f, %.2f" % os.getloadavg() try: freenas_build = '%s %s' % (get_sw_name(), get_sw_login_version()) except: freenas_build = "Unrecognized build" try: conf = Configuration.Configuration() manifest = conf.SystemManifest() builddate = datetime.utcfromtimestamp(int(manifest.Sequence())) except: builddate = None return { 'hostname': hostname, 'platform': platform, 'physmem': physmem, 'date': date, 'uptime': uptime, 'loadavg': loadavg, 'freenas_build': freenas_build, 'builddate': builddate, }
def _system_info(request=None): # OS, hostname, release __, hostname, __ = os.uname()[0:3] platform = sysctl.filter('hw.model')[0].value physmem = '%dMB' % ( sysctl.filter('hw.physmem')[0].value / 1048576, ) # All this for a timezone, because time.asctime() doesn't add it in. date = time.strftime('%a %b %d %H:%M:%S %Z %Y') + '\n' uptime = subprocess.check_output( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", shell=True ) loadavg = "%.2f, %.2f, %.2f" % os.getloadavg() freenas_build = "Unrecognized build (%s missing?)" % VERSION_FILE try: with open(VERSION_FILE) as d: freenas_build = d.read() except: pass return { 'hostname': hostname, 'platform': platform, 'physmem': physmem, 'date': date, 'uptime': uptime, 'loadavg': loadavg, 'freenas_build': freenas_build, }
def get_memory_info(self, arc_size): if osc.IS_FREEBSD: page_size = int(sysctl.filter("hw.pagesize")[0].value) classes = { k: v if isinstance(v, int) else struct.unpack("I", v)[0] * page_size for k, v in [ (k, sysctl.filter(f"vm.stats.vm.v_{k}_count")[0].value) for k in ["cache", "laundry", "inactive", "active", "wire", "free"] ] } classes["os_reserved"] = int(sysctl.filter("hw.physmem")[0].value) - sum(classes.values()) classes["wire"] -= arc_size classes["arc"] = arc_size extra = {} sswap = psutil.swap_memory() swap = { "used": sswap.used, "total": sswap.total, } else: with open("/proc/meminfo") as f: meminfo = { s[0]: humanfriendly.parse_size(s[1], binary=True) for s in [ line.split(":", 1) for line in f.readlines() ] } classes = {} classes["page_tables"] = meminfo["PageTables"] classes["swap_cache"] = meminfo["SwapCached"] classes["slab_cache"] = meminfo["Slab"] classes["cache"] = meminfo["Cached"] classes["buffers"] = meminfo["Buffers"] classes["unused"] = meminfo["MemFree"] classes["arc"] = arc_size classes["apps"] = meminfo["MemTotal"] - sum(classes.values()) extra = { "inactive": meminfo["Inactive"], "committed": meminfo["Committed_AS"], "active": meminfo["Active"], "vmalloc_used": meminfo["VmallocUsed"], "mapped": meminfo["Mapped"], } swap = { "used": meminfo["SwapTotal"] - meminfo["SwapFree"], "total": meminfo["SwapTotal"], } return { "classes": classes, "extra": extra, "swap": swap, }
async def info(self): """ Returns basic system information. """ uptime = (await (await Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, )).communicate())[0].decode().strip() serial = (await (await Popen( ['dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None product = (await (await Popen( ['dmidecode', '-s', 'system-product-name'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None license = get_license()[0] if license: license = { "contract_type": ContractType(license.contract_type).name.upper(), "contract_end": license.contract_end, } return { 'version': self.version(), 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'cores': sysctl.filter('hw.ncpu')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'system_serial': serial, 'system_product': product, 'license': license, 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0]), 'datetime': datetime.utcnow(), 'timezone': (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone'], }
def run(self): cp_time_last = None cp_times_last = None last_interface_stats = None while not self._cancel.is_set(): data = {} # Virtual memory use data['virtual_memory'] = psutil.virtual_memory()._asdict() data['cpu'] = {} # Get CPU usage % # cp_times has values for all cores cp_times = sysctl.filter('kern.cp_times')[0].value # cp_time is the sum of all cores cp_time = sysctl.filter('kern.cp_time')[0].value if cp_times_last: # Get the difference of times between the last check and the current one # cp_time has a list with user, nice, system, interrupt and idle cp_diff = list(map(lambda x: x[0] - x[1], zip(cp_times, cp_times_last))) cp_nums = int(len(cp_times) / 5) for i in range(cp_nums): data['cpu'][i] = self.get_cpu_usages(cp_diff[i * 5:i * 5 + 5]) cp_diff = list(map(lambda x: x[0] - x[1], zip(cp_time, cp_time_last))) data['cpu']['average'] = self.get_cpu_usages(cp_diff) cp_time_last = cp_time cp_times_last = cp_times # CPU temperature data['cpu']['temperature'] = {} for i in itertools.count(): v = sysctl.filter(f'dev.cpu.{i}.temperature') if not v: break data['cpu']['temperature'][i] = v[0].value # Interface related statistics data['interfaces'] = {} retrieve_stat_keys = ['received_bytes', 'sent_bytes'] for iface in netif.list_interfaces().values(): for addr in filter(lambda addr: addr.af.name.lower() == 'link', iface.addresses): addr_data = addr.__getstate__(stats=True) data['interfaces'][iface.name] = {} for k in retrieve_stat_keys: data['interfaces'][iface.name].update({ k: addr_data['stats'][k], f'{k}_last': addr_data['stats'][k] - ( 0 if not last_interface_stats else last_interface_stats.get(iface.name, {}).get(k, 0) ) }) last_interface_stats = data['interfaces'].copy() self.send_event('ADDED', fields=data) time.sleep(2)
async def info(self): """ Returns basic system information. """ uptime = (await (await Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, )).communicate())[0].decode().strip() serial = await self._system_serial() product = (await (await Popen( ['dmidecode', '-s', 'system-product-name'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None manufacturer = (await (await Popen( ['dmidecode', '-s', 'system-manufacturer'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None return { 'version': self.version(), 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'cores': sysctl.filter('hw.ncpu')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'uptime_seconds': time.clock_gettime(5), # CLOCK_UPTIME = 5 'system_serial': serial, 'system_product': product, 'license': await self.__get_license(), 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0]), 'datetime': datetime.utcnow(), 'timezone': (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone'], 'system_manufacturer': manufacturer, }
def set_ctl_ha_peer(middleware): with contextlib.suppress(IndexError): if middleware.call_sync("iscsi.global.alua_enabled"): node = middleware.call_sync("failover.node") if node == "A": sysctl.filter("kern.cam.ctl.ha_peer")[0].value = "listen 169.254.10.1" if node == "B": sysctl.filter("kern.cam.ctl.ha_peer")[0].value = "connect 169.254.10.1" else: sysctl.filter("kern.cam.ctl.ha_peer")[0].value = ""
def flags(self): data = {} vmx = sysctl.filter('hw.vmm.vmx.initialized') data['intel_vmx'] = True if vmx and vmx[0].value else False ug = sysctl.filter('hw.vmm.vmx.cap.unrestricted_guest') data['unrestricted_guest'] = True if ug and ug[0].value else False return data
async def update_timeout_value(middleware, *args): if not await middleware.call( 'tunable.query', [ ['var', '=', 'kern.init_shutdown_timeout'], ['type', '=', 'SYSCTL'], ['enabled', '=', True] ] ): # Default 120 seconds is being added to scripts timeout to ensure other # system related scripts can execute safely within the default timeout timeout_value = 120 + sum( list( map( lambda i: i['timeout'], await middleware.call( 'initshutdownscript.query', [ ['enabled', '=', True], ['when', '=', 'SHUTDOWN'] ] ) ) ) ) await middleware.run_in_thread( lambda: setattr( sysctl.filter('kern.init_shutdown_timeout')[0], 'value', timeout_value ) )
async def _event_devfs(middleware, event_type, args): data = args['data'] if data.get('subsystem') != 'CDEV': return if data['type'] == 'CREATE': disks = await middleware.threaded(lambda: sysctl.filter('kern.disks')[0].value.split()) # Device notified about is not a disk if data['cdev'] not in disks: return # TODO: hack so every disk is not synced independently during boot # This is a performance issue if os.path.exists('/tmp/.sync_disk_done'): await middleware.call('disk.sync', data['cdev']) await middleware.call('disk.multipath_sync') try: with SmartAlert() as sa: sa.device_delete(data['cdev']) except Exception: pass elif data['type'] == 'DESTROY': # Device notified about is not a disk if not RE_ISDISK.match(data['cdev']): return # TODO: hack so every disk is not synced independently during boot # This is a performance issue if os.path.exists('/tmp/.sync_disk_done'): await middleware.call('disk.sync_all') await middleware.call('disk.multipath_sync') try: with SmartAlert() as sa: sa.device_delete(data['cdev']) except Exception: pass
async def _event_devfs(middleware, event_type, args): data = args['data'] if data.get('subsystem') != 'CDEV': return if data['type'] == 'CREATE': disks = await middleware.threaded( lambda: sysctl.filter('kern.disks')[0].value.split()) # Device notified about is not a disk if data['cdev'] not in disks: return # TODO: hack so every disk is not synced independently during boot # This is a performance issue if os.path.exists('/tmp/.sync_disk_done'): await middleware.call('disk.sync', data['cdev']) await middleware.call('disk.multipath_sync') try: with SmartAlert() as sa: sa.device_delete(data['cdev']) except Exception: pass elif data['type'] == 'DESTROY': # Device notified about is not a disk if not RE_ISDISK.match(data['cdev']): return # TODO: hack so every disk is not synced independently during boot # This is a performance issue if os.path.exists('/tmp/.sync_disk_done'): await middleware.call('disk.sync_all') await middleware.call('disk.multipath_sync') try: with SmartAlert() as sa: sa.device_delete(data['cdev']) except Exception: pass
def load_disks(self): logger.debug('Loading disks') self._disks.clear() unsupported = [] remote_keys = set() # TODO: blacklist disks used by dumpdev for i in sysctl.filter('kern.disks')[0].value.split(): if not i.startswith(('da', 'nvd')): continue try: disk = Disk(self, i) remote_keys.update(disk.get_keys()[1]) except (OSError, RuntimeError): unsupported.append(i) continue self._disks.add(disk) if unsupported: logger.debug('Disks without support for SCSI-3 PR: %s.', ' '.join(unsupported)) return remote_keys
def get_Kstat(): Kstats = [ "hw.pagesize", "hw.physmem", "kern.maxusers", "vm.kmem_map_free", "vm.kmem_map_size", "vm.kmem_size", "vm.kmem_size_max", "vm.kmem_size_min", "vm.kmem_size_scale", "vm.stats", "vm.swap_total", "vm.swap_reserved", "kstat.zfs", "vfs.zfs" ] Kstat = {} for kstat in Kstats: for s in sysctl.filter(kstat): if isinstance(s.value, int): Kstat[s.name] = Decimal(s.value) elif isinstance(s.value, bytearray): Kstat[s.name] = Decimal(int.from_bytes(s.value, "little")) return Kstat
def generate_serial_loader_config(middleware): advanced = middleware.call_sync("system.advanced.config") if advanced["serialconsole"]: if sysctl.filter("machdep.bootmethod")[0].value == "UEFI": # The efi console driver can do both video and serial output. # Don't enable it if it has a serial output, otherwise we may # output twice to the same serial port in loader. consoles = list_efi_consoles() if any(path.find('Serial') != -1 for path in consoles): # Firmware gave efi a serial port. # Use only comconsole to avoid duplicating output. console = "comconsole" else: console = "comconsole,efi" else: console = "comconsole,vidconsole" return [ f'comconsole_port="{advanced["serialport"]}"', f'comconsole_speed="{advanced["serialspeed"]}"', 'boot_multicons="YES"', 'boot_serial="YES"', f'console="{console}"', ] return []
def __init__(self, *args, **kwargs): for oid in sysctl.filter('freenas'): oid_save = oid parts = oid.name.split('.') fixed_parts = parts[1:len(parts)] oid = '.'.join(fixed_parts) base = type(self) klass = base parts = oid.split('.') for i in range(0, len(parts)): if i == 0 and not getattr(self, parts[i], False): klass = type(parts[i], (base, ), {}) setattr(self, parts[i], klass) elif i == 0 and getattr(self, parts[i], False): klass = getattr(self, parts[i]) base = klass.__bases__[0] elif i == len(parts) - 1: setattr(klass, parts[i], oid_save.value) elif getattr(klass, parts[i], False): tmp = getattr(klass, parts[i]) base = klass klass = tmp else: tmp = type(parts[i], (klass, ), {}) setattr(klass, parts[i], tmp) base = klass klass = tmp
def __init__(self, *args, **kwargs): for oid in sysctl.filter('freenas'): oid_save = oid parts = oid.name.split('.') fixed_parts = parts[1:len(parts)] oid = '.'.join(fixed_parts) base = type(self) klass = base parts = oid.split('.') for i in range(0, len(parts)): if i == 0 and not getattr(self, parts[i], False): klass = type(parts[i], (base,), { }) setattr(self, parts[i], klass) elif i == 0 and getattr(self, parts[i], False): klass = getattr(self, parts[i]) base = klass.__bases__[0] elif i == len(parts) - 1: setattr(klass, parts[i], oid_save.value) elif getattr(klass, parts[i], False): tmp = getattr(klass, parts[i]) base = klass klass = tmp else: tmp = type(parts[i], (klass,), { }) setattr(klass, parts[i], tmp) base = klass klass = tmp
def r50_nvme_enclosures(self): system_product = self.middleware.call_sync( "system.info")["system_product"] if system_product != "TRUENAS-R50": return [] nvme_to_nvd = self.middleware.call_sync('disk.nvme_to_nvd_map') slot_to_nvd = {} for nvme, nvd in nvme_to_nvd.items(): try: location = sysctl.filter(f"dev.nvme.{nvme}.%location")[0].value m = re.search(self.RE_HANDLE, location) if not m: continue handle = m.group(1) if handle not in self.HANDLES: continue slot = self.HANDLES[handle] except IndexError: continue slot_to_nvd[slot] = f"nvd{nvd}" return self.middleware.call_sync("enclosure.fake_nvme_enclosure", "r50_nvme_enclosure", "R50 NVMe enclosure", "R50, Drawer #3", 3, slot_to_nvd)
async def __teardown_guest_vmemory(self, id): guest_status = await self.middleware.call('vm.status', id) vm = await self.middleware.call('datastore.query', 'vm.vm', [('id', '=', id)]) guest_memory = vm[0].get('memory', None) * 1024 * 1024 max_arc = sysctl.filter('vfs.zfs.arc_max') resize_arc = max_arc[0].value + guest_memory if guest_status.get('state') == "STOPPED": if resize_arc <= ZFS_ARC_MAX: sysctl.filter('vfs.zfs.arc_max')[0].value = max_arc[0].value + guest_memory self.logger.debug("===> Give back guest memory to ARC.: {}".format(guest_memory)) elif resize_arc > ZFS_ARC_MAX and max_arc[0].value < ZFS_ARC_MAX: sysctl.filter('vfs.zfs.arc_max')[0].value = ZFS_ARC_MAX self.logger.debug("===> Enough guest memory to set ARC back to its original limit.") return True return False
async def __teardown_guest_vmemory(self, id): guest_status = await self.middleware.call('vm.status', id) vm = await self.middleware.call('datastore.query', 'vm.vm', [('id', '=', id)]) guest_memory = vm[0].get('memory', None) * 1024 * 1024 max_arc = sysctl.filter('vfs.zfs.arc_max') resize_arc = max_arc[0].value + guest_memory if guest_status.get('state') == 'STOPPED': if resize_arc <= ZFS_ARC_MAX: sysctl.filter('vfs.zfs.arc_max')[0].value = max_arc[0].value + guest_memory self.logger.debug('===> Give back guest memory to ARC.: {}'.format(guest_memory)) elif resize_arc > ZFS_ARC_MAX and max_arc[0].value < ZFS_ARC_MAX: sysctl.filter('vfs.zfs.arc_max')[0].value = ZFS_ARC_MAX self.logger.debug('===> Enough guest memory to set ARC back to its original limit.') return True return False
def generate_serial_loader_config(middleware): advanced = middleware.call_sync("system.advanced.config") mseries = middleware.call_sync("failover.hardware") == "ECHOWARP" if advanced["serialconsole"]: if sysctl.filter("machdep.bootmethod")[0].value == "UEFI": # The efi console driver can do both video and serial output. # Don't enable it if it has a serial output, otherwise we may # output twice to the same serial port in loader. # However, enabling serial output on UEFI booted m-series devices # causes the boot loader screen to not show on the iKVM/HTML5 IPMI # website. consoles = list_efi_consoles() if any(path.find('Serial') != -1 for path in consoles) and not mseries: # Firmware gave efi a serial port. # Use only comconsole to avoid duplicating output. console = "comconsole" else: console = "comconsole,efi" else: console = "comconsole,vidconsole" return [ f'comconsole_port="{advanced["serialport"]}"', f'comconsole_speed="{advanced["serialspeed"]}"', 'boot_multicons="YES"', 'boot_serial="YES"', f'console="{console}"', ] return []
def run(self): alerts = [] mpr = defaultdict(dict) for o in sysctl.filter('dev.mpr'): mibs = o.name.split('.', 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split('.', 1)[0]) mpr[number][mib] = major except: continue for number, mibs in mpr.items(): firmware = mibs.get('firmware_version') driver = mibs.get('driver_version') if firmware != driver: alerts.append( Alert( Alert.WARN, _('Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mpr%(mpr)s') % { 'fwversion': firmware, 'drversion': driver, 'mpr': number, })) return alerts
def run(self): alerts = [] mps = defaultdict(dict) for o in sysctl.filter('dev.mps'): mibs = o.name.split('.', 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split('.', 1)[0]) mps[number][mib] = major except: continue for number, mibs in mps.items(): firmware = mibs.get('firmware_version') driver = mibs.get('driver_version') if firmware != driver: alerts.append(Alert( Alert.WARN, _( 'Firmware version %s does not match driver version %s ' 'for /dev/mps%s' ) % (firmware, driver, number) )) return alerts
def run(self): alerts = [] mpr = defaultdict(dict) for o in sysctl.filter('dev.mpr'): mibs = o.name.split('.', 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split('.', 1)[0]) mpr[number][mib] = major except: continue for number, mibs in mpr.items(): firmware = mibs.get('firmware_version') driver = mibs.get('driver_version') if firmware != driver: alerts.append(Alert( Alert.WARN, _( 'Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mpr%(mpr)s. Please ' 'flash controller to P%(drversion)s IT firmware.' ) % { 'fwversion': firmware, 'drversion': driver, 'mpr': number, } )) return alerts
async def validate(self, data, schema_name, verrors, old=None): if old is None: old = {} lunid = data['lunid'] old_lunid = old.get('lunid') target = data['target'] old_target = old.get('target') extent = data['extent'] lun_map_size = sysctl.filter('kern.cam.ctl.lun_map_size')[0].value if lunid < 0 or lunid > lun_map_size - 1: verrors.add(f'{schema_name}.lunid', 'LUN ID must be a positive integer and lower than' f' {lun_map_size - 1}') if lunid and target: filters = [('lunid', '=', lunid), ('target', '=', target)] result = await self.query(filters) if old_lunid != lunid and result: verrors.add(f'{schema_name}.lunid', 'LUN ID is already being used for this target.' ) if target and extent: filters = [('target', '=', target), ('extent', '=', extent)] result = await self.query(filters) if old_target != target and result: verrors.add(f'{schema_name}.target', 'Extent is already in this target.')
def flags(self): """Returns a dictionary with CPU flags for bhyve.""" data = {} vmx = sysctl.filter('hw.vmm.vmx.initialized') data['intel_vmx'] = True if vmx and vmx[0].value else False ug = sysctl.filter('hw.vmm.vmx.cap.unrestricted_guest') data['unrestricted_guest'] = True if ug and ug[0].value else False rvi = sysctl.filter('hw.vmm.svm.features') data['amd_rvi'] = True if rvi and rvi[0].value != 0 else False asids = sysctl.filter('hw.vmm.svm.num_asids') data['amd_asids'] = True if asids and asids[0].value != 0 else False return data
def sysctl(self, name): """ Tiny wrapper for sysctl module for compatibility """ sysc = sysctl.filter(str(name)) if sysc: return sysc[0].value raise ValueError(name)
def clean_iscsi_lunid(self): lunid = self.cleaned_data.get('iscsi_lunid') lun_map_size = sysctl.filter('kern.cam.ctl.lun_map_size')[0].value if lunid < 0 or lunid > lun_map_size - 1: raise forms.ValidationError( _('LUN ID must be a positive integer and lower than %d') % (lun_map_size - 1)) return lunid
async def info(self): """ Returns basic system information. """ buildtime = sw_buildtime() if buildtime: buildtime = datetime.fromtimestamp(int(buildtime)), uptime = (await (await Popen( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", stdout=subprocess.PIPE, shell=True, )).communicate())[0].decode().strip() serial = await self._system_serial() product = (await(await Popen( ['dmidecode', '-s', 'system-product-name'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None manufacturer = (await(await Popen( ['dmidecode', '-s', 'system-manufacturer'], stdout=subprocess.PIPE, )).communicate())[0].decode().strip() or None return { 'version': self.version(), 'buildtime': buildtime, 'hostname': socket.gethostname(), 'physmem': sysctl.filter('hw.physmem')[0].value, 'model': sysctl.filter('hw.model')[0].value, 'cores': sysctl.filter('hw.ncpu')[0].value, 'loadavg': os.getloadavg(), 'uptime': uptime, 'uptime_seconds': time.clock_gettime(5), # CLOCK_UPTIME = 5 'system_serial': serial, 'system_product': product, 'license': await self.__get_license(), 'boottime': datetime.fromtimestamp( struct.unpack('l', sysctl.filter('kern.boottime')[0].value[:8])[0] ), 'datetime': datetime.utcnow(), 'timezone': (await self.middleware.call('datastore.config', 'system.settings'))['stg_timezone'], 'system_manufacturer': manufacturer, }
def set_ctl_ha_peer(middleware): with contextlib.suppress(IndexError): if middleware.call_sync("iscsi.global.alua_enabled"): node = middleware.call_sync("failover.node") # 999 is the port used by ALUA on the heartbeat interface # on TrueNAS HA systems. Because of this, we set # net.inet.ip.portrange.lowfirst=998 to ensure local # websocket connections do not have the opportunity # to interfere. sysctl.filter("net.inet.ip.portrange.lowfirst")[0].value = 998 if node == "A": sysctl.filter( "kern.cam.ctl.ha_peer")[0].value = "listen 169.254.10.1" if node == "B": sysctl.filter( "kern.cam.ctl.ha_peer")[0].value = "connect 169.254.10.1" else: sysctl.filter("kern.cam.ctl.ha_peer")[0].value = ""
def check_sync(self): alerts = [] i = 0 while True: try: critical_health = sysctl.filter( f"dev.nvdimm.{i}.critical_health")[0].value nvdimm_health = sysctl.filter( f"dev.nvdimm.{i}.nvdimm_health")[0].value es_health = sysctl.filter(f"dev.nvdimm.{i}.es_health")[0].value except IndexError: return alerts else: alerts.extend( produce_nvdimm_alerts(i, critical_health, nvdimm_health, es_health)) i += 1
def ensure_firewall_enabled(self) -> None: requirements = self._required_sysctl_properties requirement_keys = list(requirements.keys()) for item in sysctl.filter("net"): if item.name in requirement_keys: if item.value != requirements[item.name]: state = ("en" if (item.value == 0) else "dis") + "abled" raise iocage.lib.errors.FirewallDisabled( hint=f"sysctl {item.name} is not {state}", logger=self.logger)
def parse_cpu_stats(): cpu_stats={} cpu_times = sysctl.filter('kern.cp_times')[0].value cpu = 0 cpus = len(cpu_times)/5 while cpu < cpus: cpu_stats[cpu] = (cpu_times[cpu*5:(cpu+1)*5]) cpu += 1 return cpu_stats
def configure_resilver_priority(self): """ Configure resilver priority based on user selected off-peak hours. """ resilver = self.middleware.call_sync('datastore.config', 'storage.resilver') if not resilver['enabled'] or not resilver['weekday']: return higher_prio = False weekdays = map(lambda x: int(x), resilver['weekday'].split(',')) now = datetime.now() now_t = now.time() # end overlaps the day if resilver['begin'] > resilver['end']: if now.isoweekday() in weekdays and now_t >= resilver['begin']: higher_prio = True else: lastweekday = now.isoweekday() - 1 if lastweekday == 0: lastweekday = 7 if lastweekday in weekdays and now_t < resilver['end']: higher_prio = True # end does not overlap the day else: if now.isoweekday() in weekdays and now_t >= resilver[ 'begin'] and now_t < resilver['end']: higher_prio = True if higher_prio: resilver_delay = 0 resilver_min_time_ms = 9000 scan_idle = 0 else: resilver_delay = 2 resilver_min_time_ms = 3000 scan_idle = 50 sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay sysctl.filter( 'vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle
def m50_plx_enclosures(self): system_product = self.middleware.call_sync( "system.info")["system_product"] if system_product is None or not ("TRUENAS-M50" in system_product or "TRUENAS-M60" in system_product): return [] nvme_to_nvd = self.middleware.call_sync('disk.nvme_to_nvd_map') slot_to_nvd = {} for nvme, nvd in nvme_to_nvd.items(): try: pci = sysctl.filter(f"dev.nvme.{nvme}.%parent")[0].value m = re.match(self.RE_PCI, pci) if not m: continue pcib = sysctl.filter(f"dev.pci.{m.group(1)}.%parent")[0].value m = re.match(self.RE_PCIB, pcib) if not m: continue pnpinfo = sysctl.filter( f"dev.pcib.{m.group(1)}.%pnpinfo")[0].value if "vendor=0x10b5 device=0x8717" not in pnpinfo: continue location = sysctl.filter( f"dev.pcib.{m.group(1)}.%location")[0].value m = re.match(self.RE_SLOT, location) if not m: continue slot = int(m.group(1)) except IndexError: continue slot_to_nvd[slot] = f"nvd{nvd}" return self.middleware.call_sync("enclosure.fake_nvme_enclosure", "m50_plx_enclosure", "Rear NVME U.2 Hotswap Bays", "M50/60 Series", 4, slot_to_nvd)
async def _start_nfs(self, **kwargs): nfs = await self.middleware.call('datastore.config', 'services.nfs') await self.middleware.call("etc.generate", "nfsd") await self._service("rpcbind", "start", quiet=True, **kwargs) await self._service("gssd", "start", quiet=True, **kwargs) # Workaround to work with "onetime", since the rc scripts depend on rc flags. if nfs['nfs_srv_v4']: sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 4 if nfs['nfs_srv_v4_v3owner']: # Per RFC7530, sending NFSv3 style UID/GIDs across the wire is now allowed # You must have both of these sysctl's set to allow the desired functionality sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 1 sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 1 await self._service("nfsuserd", "stop", force=True, **kwargs) else: sysctl.filter('vfs.nfsd.enable_stringtouid')[0].value = 0 sysctl.filter('vfs.nfs.enable_uidtostring')[0].value = 0 await self._service("nfsuserd", "start", quiet=True, **kwargs) else: sysctl.filter('vfs.nfsd.server_max_nfsvers')[0].value = 3 if nfs['nfs_srv_16']: await self._service("nfsuserd", "start", quiet=True, **kwargs) await self._service("mountd", "start", quiet=True, **kwargs) await self._service("nfsd", "start", quiet=True, **kwargs) await self._service("statd", "start", quiet=True, **kwargs) await self._service("lockd", "start", quiet=True, **kwargs)
def configure_resilver_priority(self): """ Configure resilver priority based on user selected off-peak hours. """ resilver = self.middleware.call_sync('datastore.config', 'storage.resilver') if not resilver['enabled'] or not resilver['weekday']: return higher_prio = False weekdays = map(lambda x: int(x), resilver['weekday'].split(',')) now = datetime.now() now_t = now.time() # end overlaps the day if resilver['begin'] > resilver['end']: if now.isoweekday() in weekdays and now_t >= resilver['begin']: higher_prio = True else: lastweekday = now.isoweekday() - 1 if lastweekday == 0: lastweekday = 7 if lastweekday in weekdays and now_t < resilver['end']: higher_prio = True # end does not overlap the day else: if now.isoweekday() in weekdays and now_t >= resilver['begin'] and now_t < resilver['end']: higher_prio = True if higher_prio: resilver_delay = 0 resilver_min_time_ms = 9000 scan_idle = 0 else: resilver_delay = 2 resilver_min_time_ms = 3000 scan_idle = 50 sysctl.filter('vfs.zfs.resilver_delay')[0].value = resilver_delay sysctl.filter('vfs.zfs.resilver_min_time_ms')[0].value = resilver_min_time_ms sysctl.filter('vfs.zfs.scan_idle')[0].value = scan_idle
def run(self): alerts = [] mps = defaultdict(dict) for o in sysctl.filter('dev.mps'): mibs = o.name.split('.', 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split('.', 1)[0]) mps[number][mib] = major except: continue for number, mibs in mps.items(): firmware = mibs.get('firmware_version') driver = mibs.get('driver_version') try: # Broadcom added a new one for us, an allowed combo is p20 firmware and v21 driver # https://bugs.freenas.org/issues/16649 if ((int(firmware) != int(driver)) and not (int(firmware) == 20 and int(driver) == 21)): alerts.append(Alert( Alert.WARN, _( 'Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mps%(mps)s. Please ' 'flash controller to P%(drversion)s IT firmware.' ) % { 'fwversion': firmware, 'drversion': driver, 'mps': number, } )) except ValueError: # cast returned Cthulhu # This shouldn't ever happen but as a fallback try the old method if ((firmware != driver) and not (firmware.startswith("20") and driver.startswith("21"))): alerts.append(Alert( Alert.WARN, _( 'Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mps%(mps)s' ) % { 'fwversion': firmware, 'drversion': driver, 'mps': number, } )) return alerts
def run(self): cp_time_last = None cp_times_last = None while not self._cancel.is_set(): data = {} # Virtual memory use data['virtual_memory'] = psutil.virtual_memory()._asdict() data['cpu'] = {} # Get CPU usage % # cp_times has values for all cores cp_times = sysctl.filter('kern.cp_times')[0].value # cp_time is the sum of all cores cp_time = sysctl.filter('kern.cp_time')[0].value if cp_times_last: # Get the difference of times between the last check and the current one # cp_time has a list with user, nice, system, interrupt and idle cp_diff = list(map(lambda x: x[0] - x[1], zip(cp_times, cp_times_last))) cp_nums = int(len(cp_times) / 5) for i in range(cp_nums): data['cpu'][i] = self.get_cpu_usages(cp_diff[i * 5:i * 5 + 5]) cp_diff = list(map(lambda x: x[0] - x[1], zip(cp_time, cp_time_last))) data['cpu']['average'] = self.get_cpu_usages(cp_diff) cp_time_last = cp_time cp_times_last = cp_times # CPU temperature data['cpu']['temperature'] = {} for i in itertools.count(): v = sysctl.filter(f'dev.cpu.{i}.temperature') if not v: break data['cpu']['temperature'][i] = v[0].value self.send_event('ADDED', fields=data) time.sleep(2)
def __init__(self, interval): super().__init__() self.daemon = True self.interval = interval self.temperatures = [] self.numcpu = 0 try: self.numcpu = int(sysctl.filter("hw.ncpu")[0].value) except Exception as e: print(f"Failed to get CPU count: {e!r}")
def run(self): try: if self.arg: delay = int(self.arg) else: delay = 10 except ValueError: return # Delay too slow if delay < 5: return cp_time = sysctl.filter('kern.cp_time')[0].value cp_old = cp_time while not self._cancel.is_set(): time.sleep(delay) cp_time = sysctl.filter('kern.cp_time')[0].value cp_diff = list(map(lambda x: x[0] - x[1], zip(cp_time, cp_old))) cp_old = cp_time cpu_percent = round((sum(cp_diff[:3]) / sum(cp_diff)) * 100, 2) pools = self.middleware.call_sync( 'cache.get_or_put', CACHE_POOLS_STATUSES, 1800, self.pools_statuses, ) self.send_event('ADDED', fields={ 'cpu_percent': cpu_percent, 'memory': psutil.virtual_memory()._asdict(), 'pools': pools, 'update': self._check_update, })
async def __set_guest_vmemory(self, memory): usermem = sysctl.filter('hw.usermem') max_arc = sysctl.filter('vfs.zfs.arc_max') guest_mem_used = await self.get_vmemory_in_use() memory = memory * 1024 * 1024 # Keep at least 35% of memory from initial arc_max. throttled_arc_max = int(usermem[0].value * 1.35) - usermem[0].value # Get the user memory and keep space for ARC. throttled_user_mem = int(usermem[0].value - throttled_arc_max) # Potential memory used by guests. memory_used = guest_mem_used['RPRD'] + guest_mem_used['RNP'] vms_memory = memory_used + memory if vms_memory <= throttled_user_mem: if max_arc[0].value > throttled_arc_max: if max(max_arc[0].value - memory, 0) != 0: self.logger.info('===> Setting ARC FROM: {} TO: {}'.format(max_arc[0].value, max_arc[0].value - memory)) sysctl.filter('vfs.zfs.arc_max')[0].value = max_arc[0].value - memory return True else: return False
def identify_hypervisor(self): """ Identify Hypervisors that might work nested with bhyve. Returns: bool: True if compatible otherwise False. """ compatible_hp = ('VMwareVMware', 'Microsoft Hv', 'KVMKVMKVM', 'bhyve bhyve') identify_hp = sysctl.filter('hw.hv_vendor')[0].value.strip() if identify_hp in compatible_hp: return True return False
async def __event_system_ready(middleware, event_type, args): """ Method called when system is ready, supposed to start VMs flagged that way. """ if args['id'] != 'ready': return global ZFS_ARC_MAX max_arc = sysctl.filter('vfs.zfs.arc_max') ZFS_ARC_MAX = max_arc[0].value for vm in await middleware.call('vm.query', [('autostart', '=', True)]): await middleware.call('vm.start', vm['id'])
def _system_info(request=None): # OS, hostname, release __, hostname, __ = os.uname()[0:3] platform = sysctl.filter('hw.model')[0].value physmem = '%dMB' % ( sysctl.filter('hw.physmem')[0].value / 1048576, ) # All this for a timezone, because time.asctime() doesn't add it in. date = time.strftime('%a %b %d %H:%M:%S %Z %Y') + '\n' uptime = subprocess.check_output( "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'", shell=True ) loadavg = "%.2f, %.2f, %.2f" % os.getloadavg() try: freenas_build = '%s %s' % (get_sw_name(), get_sw_login_version()) except: freenas_build = "Unrecognized build" try: conf = Configuration.Configuration() manifest = conf.SystemManifest() builddate = datetime.utcfromtimestamp(int(manifest.Sequence())) except: builddate = None return { 'hostname': hostname, 'platform': platform, 'physmem': physmem, 'date': date, 'uptime': uptime, 'loadavg': loadavg, 'freenas_build': freenas_build, 'builddate': builddate, }
def run(self): alerts = [] mps = defaultdict(dict) for o in sysctl.filter('dev.mps'): mibs = o.name.split('.', 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split('.', 1)[0]) mps[number][mib] = major except: continue for number, mibs in mps.items(): firmware = mibs.get('firmware_version') driver = mibs.get('driver_version') try: if int(firmware) != int(driver): alerts.append(Alert( Alert.WARN, _( 'Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mps%(mps)s' ) % { 'fwversion': firmware, 'drversion': driver, 'mps': number, } )) except ValueError: # cast returned cthulu # This shouldn't ever happen but as a fallback try the old method if firmware != driver: alerts.append(Alert( Alert.WARN, _( 'Firmware version %(fwversion)s does not match driver ' 'version %(drversion)s for /dev/mps%(mps)s' ) % { 'fwversion': firmware, 'drversion': driver, 'mps': number, } )) return alerts
def run(self): alerts = [] mps = defaultdict(dict) for o in sysctl.filter("dev.mps"): mibs = o.name.split(".", 3) if len(mibs) < 4: continue number, mib = mibs[2:4] try: major = int(o.value.split(".", 1)[0]) mps[number][mib] = major except: continue for number, mibs in mps.items(): firmware = mibs.get("firmware_version") driver = mibs.get("driver_version") try: if int(firmware) != int(driver): alerts.append( Alert( Alert.WARN, _( "Firmware version %(fwversion)s does not match driver " "version %(drversion)s for /dev/mps%(mps)s. Please " "flash controller to P%(drversion)s IT firmware." ) % {"fwversion": firmware, "drversion": driver, "mps": number}, ) ) except ValueError: # cast returned cthulu # This shouldn't ever happen but as a fallback try the old method if firmware != driver: alerts.append( Alert( Alert.WARN, _( "Firmware version %(fwversion)s does not match driver " "version %(drversion)s for /dev/mps%(mps)s" ) % {"fwversion": firmware, "drversion": driver, "mps": number}, ) ) return alerts
def test_sysctl_writable(self): libAll = sysctl.filter(writable=True) cmdAll = self.command("/sbin/sysctl -WNa") cmdNames = [] for line in cmdAll.split('\n'): if not line: continue cmdNames.append(line) notIn = [] for ctl in libAll: if ctl.name not in cmdNames: notIn.append(ctl.name) print(notIn) assert len(notIn) == 0
async def validate(self, data, schema_name, verrors, old=None): if old is None: old = {} old_lunid = old.get('lunid') target = data['target'] old_target = old.get('target') extent = data['extent'] if 'lunid' not in data: lunids = [ o['lunid'] for o in await self.query( [('target', '=', target)], {'order_by': ['lunid']} ) ] if not lunids: lunid = 0 else: diff = sorted(set(range(0, lunids[-1] + 1)).difference(lunids)) lunid = diff[0] if diff else max(lunids) + 1 data['lunid'] = lunid else: lunid = data['lunid'] lun_map_size = sysctl.filter('kern.cam.ctl.lun_map_size')[0].value if lunid < 0 or lunid > lun_map_size - 1: verrors.add( f'{schema_name}.lunid', f'LUN ID must be a positive integer and lower than {lun_map_size - 1}' ) if old_lunid != lunid and await self.query([ ('lunid', '=', lunid), ('target', '=', target) ]): verrors.add( f'{schema_name}.lunid', 'LUN ID is already being used for this target.' ) if old_target != target and await self.query([ ('target', '=', target), ('extent', '=', extent)] ): verrors.add( f'{schema_name}.target', 'Extent is already in this target.' )
def run(self): if not self.numcpu: return 0 while True: temperatures = [] try: for i in range(self.numcpu): raw_temperature = int(sysctl.filter(f"dev.cpu.{i}.temperature")[0].value) temperatures.append((raw_temperature - 2732) * 100) except Exception as e: print(f"Failed to get CPU temperature: {e!r}") temperatures = [] self.temperatures = temperatures time.sleep(self.interval)
def generate_serial_loader_config(middleware): advanced = middleware.call_sync("system.advanced.config") if advanced["serialconsole"]: if sysctl.filter("machdep.bootmethod")[0].value == "UEFI": videoconsole = "efi" else: videoconsole = "vidconsole" return [ f'comconsole_port="{advanced["serialport"]}"', f'comconsole_speed="{advanced["serialspeed"]}"', 'boot_multicons="YES"', 'boot_serial="YES"', f'console="comconsole,{videoconsole}"', ] return []
def test_sysctl_setvalue(self): dummy = sysctl.filter('kern.dummy')[0] try: self.command("/sbin/sysctl kern.dummy=0") except: if os.getuid() == 0: raise try: dummy.value = 1 except TypeError: if os.getuid() == 0: raise if os.getuid() == 0: value = int(self.command("/sbin/sysctl -n kern.dummy")) if value != 1: raise ValueError("Failed to set kern.dummy")
def read(self): disks = sysctl.filter('kern.disks')[0].value.split() futures = {} with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: for disk in disks: if disk.startswith('cd'): continue futures[executor.submit(self.get_temperature, disk)] = disk for fut in concurrent.futures.as_completed(futures.keys()): disk = futures.get(fut) if not disk: continue try: temp = fut.result() if temp is None: continue self.dispatch_value(disk, 'temperature', temp, data_type='temperature') except Exception as e: collectd.info(traceback.format_exc())