def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values()
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because psutil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): """Reset the global process count""" self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None} def update_processcount(self, plist): """Update the global process count from the current processes list""" # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist) def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None else: return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS and not WSL: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list)) def getcount(self): """Get the number of processes.""" return self.processcount def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None } def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: for all processes. Needed for the sorting/filter step. Stats grabbed inside this method: * 'name', 'cpu_times', 'status', 'ppid' * 'username', 'cpu_percent', 'memory_percent' """ procstat['mandatory_stats'] = True # Name, cpu_times, status and ppid stats are in the same /proc file # Optimisation fir issue #958 try: procstat.update( proc.as_dict(attrs=['name', 'cpu_times', 'status', 'ppid'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 (process no longer exist) return None else: procstat['status'] = str(procstat['status'])[:1].upper() try: procstat.update( proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 (process no longer exist) return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Compute the maximum value for cpu_percent and memory_percent for k in self._max_values_list: if procstat[k] > self.get_max_values(k): self.set_max_values(k, procstat[k]) # Process command line (cached with internal cache) if procstat['pid'] not in self.cmdline_cache: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess, psutil.WindowsError): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Availability: all platforms except macOS and Illumos/Solaris try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 except AttributeError: return procstat else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: only for displayed processes. Stats grabbed inside this method: * nice and memory_info """ procstat['standard_stats'] = True # Process nice and memory_info (issue #926) try: procstat.update(proc.as_dict(attrs=['nice', 'memory_info'])) except psutil.NoSuchProcess: pass return procstat def __get_extended_stats(self, proc, procstat): """ Get extended stats, only for top processes (see issue #403). - cpu_affinity (Linux, Windows, FreeBSD) - ionice (Linux and Windows > Vista) - memory_full_info (Linux) - num_ctx_switches (not available on Illumos/Solaris) - num_fds (Unix-like) - num_handles (Windows) - num_threads (not available on *BSD) - memory_maps (only swap, Linux) https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ - connections (TCP and UDP) """ procstat['extended_stats'] = True for stat in [ 'cpu_affinity', 'ionice', 'memory_full_info', 'num_ctx_switches', 'num_fds', 'num_handles', 'num_threads' ]: try: procstat.update(proc.as_dict(attrs=[stat])) except psutil.NoSuchProcess: pass # XXX: psutil>=4.3.1 raises ValueError while <4.3.1 raises AttributeError except (ValueError, AttributeError): procstat[stat] = None if LINUX: try: procstat['memory_swap'] = sum( [v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, TypeError, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). # XXX: Remove TypeError once we'll drop psutil < 3.0.0. procstat['memory_swap'] = None try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except psutil.AccessDenied: procstat['tcp'] = None procstat['udp'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of a running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Reset the max dict self.reset_max_values() # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on macOS # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or MACOS and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format( self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: need for the sorting/filter step. => cpu_percent, memory_percent, io_counters, name, cmdline """ procstat['mandatory_stats'] = True # Process CPU, MEM percent and name try: procstat.update(proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent', 'name', 'cpu_times'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Process command line (cached with internal cache) try: self.cmdline_cache[procstat['pid']] except KeyError: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Note Disk IO stat not available on Mac OS if not OSX: try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for # display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + \ self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: for all the displayed processes. => username, status, memory_info, cpu_times """ procstat['standard_stats'] = True # Process username (cached with internal cache) try: self.username_cache[procstat['pid']] except KeyError: try: self.username_cache[procstat['pid']] = proc.username() except psutil.NoSuchProcess: self.username_cache[procstat['pid']] = "?" except (KeyError, psutil.AccessDenied): try: self.username_cache[procstat['pid']] = proc.uids().real except (KeyError, AttributeError, psutil.AccessDenied): self.username_cache[procstat['pid']] = "?" procstat['username'] = self.username_cache[procstat['pid']] # Process status, nice, memory_info and cpu_times try: procstat.update( proc.as_dict(attrs=['status', 'nice', 'memory_info', 'cpu_times'])) except psutil.NoSuchProcess: pass else: procstat['status'] = str(procstat['status'])[:1].upper() return procstat def __get_extended_stats(self, proc, procstat): """ Get extended_stats: only for top processes (see issue #403). => connections (UDP/TCP), memory_swap... """ procstat['extended_stats'] = True # CPU affinity (Windows and Linux only) try: procstat.update(proc.as_dict(attrs=['cpu_affinity'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['cpu_affinity'] = None # Memory extended try: procstat.update(proc.as_dict(attrs=['memory_full_info'])) except psutil.NoSuchProcess: pass except AttributeError: # Fallback to standard memory_info stats try: procstat.update(proc.as_dict(attrs=['memory_info'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['memory_info'] = None # Number of context switch try: procstat.update(proc.as_dict(attrs=['num_ctx_switches'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_ctx_switches'] = None # Number of file descriptors (Unix only) try: procstat.update(proc.as_dict(attrs=['num_fds'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_fds'] = None # Threads number try: procstat.update(proc.as_dict(attrs=['num_threads'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_threads'] = None # Number of handles (Windows only) if WINDOWS: try: procstat.update(proc.as_dict(attrs=['num_handles'])) except psutil.NoSuchProcess: pass else: procstat['num_handles'] = None # SWAP memory (Only on Linux based OS) # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ if LINUX: try: procstat['memory_swap'] = sum( [v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except psutil.AccessDenied: procstat['memory_swap'] = None except Exception: # Add a dirty except to handle the PsUtil issue #413 procstat['memory_swap'] = None # Process network connections (TCP and UDP) try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except Exception: procstat['tcp'] = None procstat['udp'] = None # IO Nice # http://pythonhosted.org/psutil/#psutil.Process.ionice if LINUX or WINDOWS: try: procstat.update(proc.as_dict(attrs=['ionice'])) except psutil.NoSuchProcess: pass else: procstat['ionice'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or OSX and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format(self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because psutil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): """Reset the global process count""" self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None} def update_processcount(self, plist): """Update the global process count from the current processes list""" # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist) def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None else: return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list)) def getcount(self): """Get the number of processes.""" return self.processcount def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because psutil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Init stats self.auto_sort = None self._sort_key = None # Default processes sort key is 'auto' # Can be overwrite from the configuration file (issue#1536) => See glances_processlist.py init self.set_sort_key('auto', auto=True) self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Test if the system can grab io_counters try: p = psutil.Process() p.io_counters() except Exception as e: logger.warning( 'PsUtil can not grab processes io_counters ({})'.format(e)) self.disable_io_counters = True else: logger.debug('PsUtil can grab processes io_counters') self.disable_io_counters = False # Test if the system can grab gids try: p = psutil.Process() p.gids() except Exception as e: logger.warning('PsUtil can not grab processes gids ({})'.format(e)) self.disable_gids = True else: logger.debug('PsUtil can grab processes gids') self.disable_gids = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values()
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None} def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: for all processes. Needed for the sorting/filter step. Stats grabbed inside this method: * 'name', 'cpu_times', 'status', 'ppid' * 'username', 'cpu_percent', 'memory_percent' """ procstat['mandatory_stats'] = True # Name, cpu_times, status and ppid stats are in the same /proc file # Optimisation fir issue #958 try: procstat.update(proc.as_dict( attrs=['name', 'cpu_times', 'status', 'ppid'], ad_value='')) except (psutil.NoSuchProcess, psutil.AccessDenied): # Try/catch for issue #432 (process no longer exist) # Try/catch for issue #1120 (only see on Macos) return None else: procstat['status'] = str(procstat['status'])[:1].upper() try: procstat.update(proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent'], ad_value='')) except (psutil.NoSuchProcess, psutil.AccessDenied): # Try/catch for issue #432 (process no longer exist) return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Compute the maximum value for cpu_percent and memory_percent for k in self._max_values_list: if procstat[k] > self.get_max_values(k): self.set_max_values(k, procstat[k]) # Process command line (cached with internal cache) if procstat['pid'] not in self.cmdline_cache: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, EnvironmentError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Availability: all platforms except macOS and Illumos/Solaris try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 except AttributeError: return procstat else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: only for displayed processes. Stats grabbed inside this method: * nice and memory_info """ procstat['standard_stats'] = True # Process nice and memory_info (issue #926) try: procstat.update( proc.as_dict(attrs=['nice', 'memory_info'])) except psutil.NoSuchProcess: pass return procstat def __get_extended_stats(self, proc, procstat): """ Get extended stats, only for top processes (see issue #403). - cpu_affinity (Linux, Windows, FreeBSD) - ionice (Linux and Windows > Vista) - memory_full_info (Linux) - num_ctx_switches (not available on Illumos/Solaris) - num_fds (Unix-like) - num_handles (Windows) - num_threads (not available on *BSD) - memory_maps (only swap, Linux) https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ - connections (TCP and UDP) """ procstat['extended_stats'] = True for stat in ['cpu_affinity', 'ionice', 'memory_full_info', 'num_ctx_switches', 'num_fds', 'num_handles', 'num_threads']: try: procstat.update(proc.as_dict(attrs=[stat])) except psutil.NoSuchProcess: pass # XXX: psutil>=4.3.1 raises ValueError while <4.3.1 raises AttributeError except (ValueError, AttributeError): procstat[stat] = None if LINUX: try: procstat['memory_swap'] = sum([v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, TypeError, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). # XXX: Remove TypeError once we'll drop psutil < 3.0.0. procstat['memory_swap'] = None try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except psutil.AccessDenied: procstat['tcp'] = None procstat['udp'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of a running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Reset the max dict self.reset_max_values() # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on macOS # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or MACOS and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format(self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key