def __update(self): """Load the IRQ file and update the internal dict.""" self.reset() if not os.path.exists(self.IRQ_FILE): # Correct issue #947: IRQ file do not exist on OpenVZ container return self.stats try: with open(self.IRQ_FILE) as irq_proc: time_since_update = getTimeSinceLastUpdate('irq') # Read the header self.__header(irq_proc.readline()) # Read the rest of the lines (one line per IRQ) for line in irq_proc.readlines(): irq_line = self.__humanname(line) current_irqs = self.__sum(line) irq_rate = int( current_irqs - self.lasts.get(irq_line) if self.lasts.get(irq_line) else 0 // time_since_update) irq_current = { 'irq_line': irq_line, 'irq_rate': irq_rate, 'key': self.get_key(), 'time_since_update': time_since_update } self.stats.append(irq_current) self.lasts[irq_line] = current_irqs except (OSError, IOError): pass return self.stats
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["networks"] except KeyError as e: # all_stats do not have NETWORK information logger.debug("Can not grab NET usage for container {0} ({1})".format(container_id, e)) logger.debug(all_stats) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{0}'.format(container_id)) network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"] network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"] network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"] network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"] except KeyError: # all_stats do not have INTERFACE information logger.debug("Can not grab network interface usage for container {0} ({1})".format(container_id, e)) logger.debug(all_stats) # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
def update_local(self): """Update CPU stats using psutil.""" # Grab CPU stats using psutil's cpu_percent and cpu_times_percent # Get all possible values for CPU stats: user, system, idle, # nice (UNIX), iowait (Linux), irq (Linux, FreeBSD), steal (Linux 2.6.11+) # The following stats are returned by the API but not displayed in the UI: # softirq (Linux), guest (Linux 2.6.24+), guest_nice (Linux 3.2.0+) # Init new stats stats = self.get_init_value() stats['total'] = cpu_percent.get() cpu_times_percent = psutil.cpu_times_percent(interval=0.0) for stat in ['user', 'system', 'idle', 'nice', 'iowait', 'irq', 'softirq', 'steal', 'guest', 'guest_nice']: if hasattr(cpu_times_percent, stat): stats[stat] = getattr(cpu_times_percent, stat) # Additional CPU stats (number of events not as a %; psutil>=4.1.0) # ctx_switches: number of context switches (voluntary + involuntary) per second # interrupts: number of interrupts per second # soft_interrupts: number of software interrupts per second. Always set to 0 on Windows and SunOS. # syscalls: number of system calls since boot. Always set to 0 on Linux. cpu_stats = psutil.cpu_stats() # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('cpu') # Previous CPU stats are stored in the cpu_stats_old variable if not hasattr(self, 'cpu_stats_old'): # First call, we init the cpu_stats_old var self.cpu_stats_old = cpu_stats else: for stat in cpu_stats._fields: if getattr(cpu_stats, stat) is not None: stats[stat] = getattr(cpu_stats, stat) - getattr(self.cpu_stats_old, stat) stats['time_since_update'] = time_since_update # Core number is needed to compute the CTX switch limit stats['cpucore'] = self.nb_log_core # Save stats to compute next step self.cpu_stats_old = cpu_stats return stats
def update(self): """Update the IRQ stats""" # Reset the list self.reset() if not LINUX: # only available on GNU/Linux return self.stats if self.input_method == "local": with open("/proc/interrupts") as irq_proc: time_since_update = getTimeSinceLastUpdate("irq") irq_proc.readline() # skip header line for irq_line in irq_proc.readlines(): splitted_line = irq_line.split() irq_line = splitted_line[0].replace(":", "") current_irqs = sum( [int(count) for count in splitted_line[1:] if count.isdigit()] ) # sum interrupts on all CPUs irq_rate = int( current_irqs - self.lasts.get(irq_line) if self.lasts.get(irq_line) else 0 // time_since_update ) irq_current = { "irq_line": irq_line, "irq_rate": irq_rate, "key": self.get_key(), "time_since_update": time_since_update, } self.stats.append(irq_current) self.lasts[irq_line] = current_irqs elif self.input_method == "snmp": # not available pass # Update the view self.update_views() self.stats = sorted(self.stats, key=operator.itemgetter("irq_rate"), reverse=True)[:5] # top 5 IRQ by rate/s return self.stats
def update(self): """Update network stats using the input method. Stats is a list of dict (one dict per interface) """ # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # Grab network interface stat using the PsUtil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError: return self.stats # New in PsUtil 3.0 # - import the interface's status (issue #765) # - import the interface's speed (issue #718) netstatus = {} try: netstatus = psutil.net_if_stats() except AttributeError: pass # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: cumulative_rx = network_new[net].bytes_recv cumulative_tx = network_new[net].bytes_sent cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - self.network_old[net].bytes_recv tx = cumulative_tx - self.network_old[net].bytes_sent cx = rx + tx netstat = { 'interface_name': net, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx} except KeyError: continue else: # Optional stats (only compliant with PsUtil 3.0+) # Interface status try: netstat['is_up'] = netstatus[net].isup except (KeyError, AttributeError): pass # Interface speed in Mbps, convert it to bps # Can be always 0 on some OS try: netstat['speed'] = netstatus[net].speed * 1048576 except (KeyError, AttributeError): pass # Finaly, set the key netstat['key'] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new elif self.input_method == 'snmp': # Update stats using SNMP # SNMP bulk command to get all network interface in one shot try: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid['default'], bulk=True) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # See description in the 'local' block time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: # Windows: a tips is needed to convert HEX to TXT # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx if self.short_system_name == 'windows': try: interface_name = str(base64.b16decode(net[2:-2].upper())) except TypeError: interface_name = net else: interface_name = net cumulative_rx = float(network_new[net]['cumulative_rx']) cumulative_tx = float(network_new[net]['cumulative_tx']) cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - float(self.network_old[net]['cumulative_rx']) tx = cumulative_tx - float(self.network_old[net]['cumulative_tx']) cx = rx + tx netstat = { 'interface_name': interface_name, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx} except KeyError: continue else: netstat['key'] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new return self.stats
def update(self): """Update network stats using the input method. Stats is a list of dict (one dict per interface) """ # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab network interface stat using the psutil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError as e: logger.debug('Can not get network interface counters ({})'.format(e)) return self.stats # Grab interface's status (issue #765) # Grab interface's speed (issue #718) netstatus = {} try: netstatus = psutil.net_if_stats() except OSError as e: # see psutil #797/glances #1106 logger.debug('Can not get network interface status ({})'.format(e)) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass return self.stats # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account # or KeyError: 'eth0' when interface is not connected #1348 if self.is_hide(net) or net not in netstatus: continue try: cumulative_rx = network_new[net].bytes_recv cumulative_tx = network_new[net].bytes_sent cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - self.network_old[net].bytes_recv tx = cumulative_tx - self.network_old[net].bytes_sent cx = rx + tx netstat = {'interface_name': n(net), 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx, # Interface status 'is_up': netstatus[net].isup, # Interface speed in Mbps, convert it to bps # Can be always 0 on some OSes 'speed': netstatus[net].speed * 1048576, # Set the key for the dict 'key': self.get_key() } except KeyError: continue else: # Append the interface stats to the list stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new elif self.input_method == 'snmp': # Update stats using SNMP # SNMP bulk command to get all network interface in one shot try: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid['default'], bulk=True) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # See description in the 'local' block time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: # Windows: a tips is needed to convert HEX to TXT # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx if self.short_system_name == 'windows': try: interface_name = str(base64.b16decode(net[2:-2].upper())) except TypeError: interface_name = net else: interface_name = net cumulative_rx = float(network_new[net]['cumulative_rx']) cumulative_tx = float(network_new[net]['cumulative_tx']) cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - float(self.network_old[net]['cumulative_rx']) tx = cumulative_tx - float(self.network_old[net]['cumulative_tx']) cx = rx + tx netstat = { 'interface_name': interface_name, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx} except KeyError: continue else: netstat['key'] = self.get_key() stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new # Update the stats self.stats = stats return self.stats
def update(self): """Update disk I/O stats using the input method.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab the stat using the psutil disk_io_counters method # read_count: number of reads # write_count: number of writes # read_bytes: number of bytes read # write_bytes: number of bytes written # read_time: time spent reading from disk (in milliseconds) # write_time: time spent writing to disk (in milliseconds) try: diskiocounters = psutil.disk_io_counters(perdisk=True) except Exception: return stats # Previous disk IO stats are stored in the diskio_old variable if not hasattr(self, 'diskio_old'): # First call, we init the diskio_old var try: self.diskio_old = diskiocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('disk') diskio_new = diskiocounters for disk in diskio_new: # By default, RamFS is not displayed (issue #714) if self.args is not None and not self.args.diskio_show_ramfs and disk.startswith('ram'): continue # Do not take hide disk into account if self.is_hide(disk): continue # Compute count and bit rate try: read_count = (diskio_new[disk].read_count - self.diskio_old[disk].read_count) write_count = (diskio_new[disk].write_count - self.diskio_old[disk].write_count) read_bytes = (diskio_new[disk].read_bytes - self.diskio_old[disk].read_bytes) write_bytes = (diskio_new[disk].write_bytes - self.diskio_old[disk].write_bytes) diskstat = { 'time_since_update': time_since_update, 'disk_name': n(disk), 'read_count': read_count, 'write_count': write_count, 'read_bytes': read_bytes, 'write_bytes': write_bytes} # Add alias if exist (define in the configuration file) if self.has_alias(disk) is not None: diskstat['alias'] = self.has_alias(disk) except KeyError: continue else: diskstat['key'] = self.get_key() stats.append(diskstat) # Save stats to compute next bitrate self.diskio_old = diskio_new elif self.input_method == 'snmp': # Update stats using SNMP # No standard way for the moment... pass # Update the stats self.stats = stats return self.stats
def get_docker_io(self, container_id, all_stats): """Return the container IO usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. with: time_since_update: number of seconds elapsed between the latest grab ior: Number of byte readed iow: Number of byte written """ # Init the returned dict io_new = {} # Read the ior/iow stats (in bytes) try: iocounters = all_stats["blkio_stats"] except KeyError as e: # all_stats do not have io information logger.debug( "Cannot grab block IO usage for container {} ({})".format( container_id, e)) logger.debug(all_stats) # No fallback available... return io_new # Previous io interface stats are stored in the io_old variable if not hasattr(self, 'iocounters_old'): # First call, we init the io_old var self.iocounters_old = {} try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass if container_id not in self.iocounters_old: try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable IoR/s and IoW/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: # Read IOR and IOW value in the structure list of dict ior = [ i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read' ][0]['value'] iow = [ i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write' ][0]['value'] ior_old = [ i for i in self.iocounters_old[container_id] ['io_service_bytes_recursive'] if i['op'] == 'Read' ][0]['value'] iow_old = [ i for i in self.iocounters_old[container_id] ['io_service_bytes_recursive'] if i['op'] == 'Write' ][0]['value'] except (IndexError, KeyError) as e: # all_stats do not have io information logger.debug( "Cannot grab block IO usage for container {} ({})".format( container_id, e)) else: io_new['time_since_update'] = getTimeSinceLastUpdate( 'docker_io_{}'.format(container_id)) io_new['ior'] = ior - ior_old io_new['iow'] = iow - iow_old io_new['cumulative_ior'] = ior io_new['cumulative_iow'] = iow # Save stats to compute next bitrate self.iocounters_old[container_id] = iocounters # Return the stats return io_new
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["networks"] except KeyError as e: # all_stats do not have NETWORK information logger.debug("Cannot grab NET usage for container {} ({})".format( container_id, e)) logger.debug(all_stats) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: network_new['time_since_update'] = getTimeSinceLastUpdate( 'docker_net_{}'.format(container_id)) network_new['rx'] = netcounters["eth0"][ "rx_bytes"] - self.netcounters_old[container_id]["eth0"][ "rx_bytes"] network_new['tx'] = netcounters["eth0"][ "tx_bytes"] - self.netcounters_old[container_id]["eth0"][ "tx_bytes"] network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"] network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"] except KeyError as e: # all_stats do not have INTERFACE information logger.debug( "Cannot grab network interface usage for container {} ({})" .format(container_id, e)) logger.debug(all_stats) # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS and not WSL: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list))
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Reset the max dict self.reset_max_values() # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on macOS # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or MACOS and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format( self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def update(self): """Update network stats using the input method. Stats is a list of dict (one dict per interface) """ # Reset stats self.reset() if self.input_method == "local": # Update stats using the standard system lib # Grab network interface stat using the PsUtil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError: return self.stats # New in PsUtil 3.0: optionaly import the interface's status (issue #765) netstatus = {} try: netstatus = psutil.net_if_stats() except AttributeError: pass # Previous network interface stats are stored in the network_old variable if not hasattr(self, "network_old"): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate("net") # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: cumulative_rx = network_new[net].bytes_recv cumulative_tx = network_new[net].bytes_sent cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - self.network_old[net].bytes_recv tx = cumulative_tx - self.network_old[net].bytes_sent cx = rx + tx netstat = { "interface_name": net, "time_since_update": time_since_update, "cumulative_rx": cumulative_rx, "rx": rx, "cumulative_tx": cumulative_tx, "tx": tx, "cumulative_cx": cumulative_cx, "cx": cx, } except KeyError: continue else: # Optional stats (only compliant with PsUtil 3.0+) try: netstat["is_up"] = netstatus[net].isup except (KeyError, AttributeError): pass # Set the key netstat["key"] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new elif self.input_method == "snmp": # Update stats using SNMP # SNMP bulk command to get all network interface in one shot try: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid["default"], bulk=True) # Previous network interface stats are stored in the network_old variable if not hasattr(self, "network_old"): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # See description in the 'local' block time_since_update = getTimeSinceLastUpdate("net") # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: # Windows: a tips is needed to convert HEX to TXT # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx if self.short_system_name == "windows": try: interface_name = str(base64.b16decode(net[2:-2].upper())) except TypeError: interface_name = net else: interface_name = net cumulative_rx = float(network_new[net]["cumulative_rx"]) cumulative_tx = float(network_new[net]["cumulative_tx"]) cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - float(self.network_old[net]["cumulative_rx"]) tx = cumulative_tx - float(self.network_old[net]["cumulative_tx"]) cx = rx + tx netstat = { "interface_name": interface_name, "time_since_update": time_since_update, "cumulative_rx": cumulative_rx, "rx": rx, "cumulative_tx": cumulative_tx, "tx": tx, "cumulative_cx": cumulative_cx, "cx": cx, } except KeyError: continue else: netstat["key"] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new # Update the history list self.update_stats_history(self.get_key()) # Update the view self.update_views() return self.stats
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list))
def get_docker_io(self, container_id, all_stats): """Return the container IO usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. with: time_since_update: number of seconds elapsed between the latest grab ior: Number of byte readed iow: Number of byte written """ # Init the returned dict io_new = {} # Read the ior/iow stats (in bytes) try: iocounters = all_stats["blkio_stats"] except KeyError as e: # all_stats do not have io information logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e)) logger.debug(all_stats) # No fallback available... return io_new # Previous io interface stats are stored in the io_old variable if not hasattr(self, 'iocounters_old'): # First call, we init the io_old var self.iocounters_old = {} try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass if container_id not in self.iocounters_old: try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable IoR/s and IoW/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: # Read IOR and IOW value in the structure list of dict ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value'] iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value'] ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value'] iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value'] except (IndexError, KeyError) as e: # all_stats do not have io information logger.debug("Cannot grab block IO usage for container {} ({})".format(container_id, e)) else: io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id)) io_new['ior'] = ior - ior_old io_new['iow'] = iow - iow_old io_new['cumulative_ior'] = ior io_new['cumulative_iow'] = iow # Save stats to compute next bitrate self.iocounters_old[container_id] = iocounters # Return the stats return io_new
def update(self): """Update disk I/O stats using the input method.""" # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # Grab the stat using the PsUtil disk_io_counters method # read_count: number of reads # write_count: number of writes # read_bytes: number of bytes read # write_bytes: number of bytes written # read_time: time spent reading from disk (in milliseconds) # write_time: time spent writing to disk (in milliseconds) try: diskiocounters = psutil.disk_io_counters(perdisk=True) except Exception: return self.stats # Previous disk IO stats are stored in the diskio_old variable if not hasattr(self, 'diskio_old'): # First call, we init the network_old var try: self.diskio_old = diskiocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('disk') diskio_new = diskiocounters for disk in diskio_new: # By default, RamFS is not displayed (issue #714) if self.args is not None and not self.args.diskio_show_ramfs and disk.startswith('ram'): continue # Do not take hide disk into account if self.is_hide(disk): continue # Compute count and bit rate try: read_count = (diskio_new[disk].read_count - self.diskio_old[disk].read_count) write_count = (diskio_new[disk].write_count - self.diskio_old[disk].write_count) read_bytes = (diskio_new[disk].read_bytes - self.diskio_old[disk].read_bytes) write_bytes = (diskio_new[disk].write_bytes - self.diskio_old[disk].write_bytes) diskstat = { 'time_since_update': time_since_update, 'disk_name': disk, 'read_count': read_count, 'write_count': write_count, 'read_bytes': read_bytes, 'write_bytes': write_bytes} except KeyError: continue else: diskstat['key'] = self.get_key() self.stats.append(diskstat) # Save stats to compute next bitrate self.diskio_old = diskio_new elif self.input_method == 'snmp': # Update stats using SNMP # No standard way for the moment... pass # Update the history list self.update_stats_history('disk_name') # Update the view self.update_views() return self.stats
def update(self): """Update network stats using the input method. Stats is a list of dict (one dict per interface) """ # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # Grab network interface stat using the PsUtil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError: return self.stats # New in PsUtil 3.0: optionaly import the interface's status (issue #765) netstatus = {} try: netstatus = psutil.net_if_stats() except AttributeError: pass # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: cumulative_rx = network_new[net].bytes_recv cumulative_tx = network_new[net].bytes_sent cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - self.network_old[net].bytes_recv tx = cumulative_tx - self.network_old[net].bytes_sent cx = rx + tx netstat = { 'interface_name': net, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx } except KeyError: continue else: # Optional stats (only compliant with PsUtil 3.0+) try: netstat['is_up'] = netstatus[net].isup except (KeyError, AttributeError): pass # Set the key netstat['key'] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new elif self.input_method == 'snmp': # Update stats using SNMP # SNMP bulk command to get all network interface in one shot try: netiocounters = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: netiocounters = self.get_stats_snmp( snmp_oid=snmp_oid['default'], bulk=True) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # See description in the 'local' block time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: # Windows: a tips is needed to convert HEX to TXT # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx if self.short_system_name == 'windows': try: interface_name = str( base64.b16decode(net[2:-2].upper())) except TypeError: interface_name = net else: interface_name = net cumulative_rx = float( network_new[net]['cumulative_rx']) cumulative_tx = float( network_new[net]['cumulative_tx']) cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - float( self.network_old[net]['cumulative_rx']) tx = cumulative_tx - float( self.network_old[net]['cumulative_tx']) cx = rx + tx netstat = { 'interface_name': interface_name, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx } except KeyError: continue else: netstat['key'] = self.get_key() self.stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new # Update the history list self.update_stats_history(self.get_key()) # Update the view self.update_views() return self.stats