def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate( psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = { 'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle } # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
def update(self, servers_list): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. servers_list: Dict of dict with servers stats """ # Flush display logger.debug("Servers list: {}".format(servers_list)) self.flush(servers_list) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(servers_list) # Is it an exit or select server key ? exitkey = pressedkey == ord("\x1b") or pressedkey == ord("q") or pressedkey == 10 if not exitkey and pressedkey > -1: # Redraw display self.flush(servers_list) # Wait 100ms... self.wait() return self.active_server
def update(self, servers_list): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. servers_list: Dict of dict with servers stats """ # Flush display logger.debug('Servers list: {}'.format(servers_list)) self.flush(servers_list) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(servers_list) # Is it an exit or select server key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(servers_list) # Wait 100ms... self.wait() return self.active_server
def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. stats: Dict of dict with servers stats """ # Flush display logger.debug('Servers list: {}'.format(stats)) self.flush(stats) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(stats) # Is it an exit or select server key ? exitkey = ( pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats) # Wait 100ms... self.wait() return self.active_server
def __init__(self, config=None, args=None): # Init config self.config = config # Init args self.args = args # Init stats # Will be updated within Bottle route self.stats = None # cached_time is the minimum time interval between stats updates # i.e. HTTP/Restful calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) # Load configuration file self.load_config(config) # Init Bottle self._app = Bottle() # Enable CORS (issue #479) self._app.install(EnableCors()) # Password if args.password != '': self._app.install(auth_basic(self.check_auth)) # Define routes self._route() # Path where the statics files are stored self.STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static/public')
def update(self): """Update the ports list.""" if self.input_method == 'local': # Only refresh: # * if there is not other scanning thread # * every refresh seconds (define in the configuration file) if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.isAlive() if self.timer_ports.finished() and not thread_is_running: # Run ports scanner self._thread = ThreadScanner(self.stats) self._thread.start() # Restart timer if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0]['refresh']) else: self.timer_ports = Timer(0) else: # Not available in SNMP mode pass return self.stats
def __get_cpu(self): """Update and/or return the CPU using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_cpu.finished(): self.cpu_percent = psutil.cpu_percent(interval=0.0) # Reset timer for cache self.timer_cpu = Timer(self.cached_time) return self.cpu_percent
def __init__(self, cached_time=1): self.cpu_percent = 0 self.percpu_percent = [] # cached_time is the minimum time interval between stats updates # since last update is passed (will retrieve old cached info instead) self.timer_cpu = Timer(0) self.timer_percpu = Timer(0) self.cached_time = cached_time
def __init__(self, cached_time=1, config=None): # Init stats self.stats = GlancesStatsServer(config) # Initial update self.stats.update() # cached_time is the minimum time interval between stats updates # i.e. XML/RPC calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) self.cached_time = cached_time
def __init__(self, args=None): """Init GlancesActions class.""" # Dict with the criticity status # - key: stat_name # - value: criticity # Goal: avoid to execute the same command twice self.status = {} # Add a timer to avoid any trigger when Glances is started (issue#732) # Action can be triggered after refresh * 2 seconds if hasattr(args, 'time'): self.start_timer = Timer(args.time * 2) else: self.start_timer = Timer(3)
def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values()
def __init__(self, config=None, args=None): """Init the export IF.""" super(Export, self).__init__(config=config, args=args) # Load the Graph configuration file section (is exists) self.export_enable = self.load_conf( 'graph', options=['path', 'generate_every', 'width', 'height', 'style']) # Manage options (command line arguments overwrite configuration file) self.path = args.export_graph_path or self.path self.generate_every = int(getattr(self, 'generate_every', 0)) self.width = int(getattr(self, 'width', 800)) self.height = int(getattr(self, 'height', 600)) self.style = getattr(pygal.style, getattr(self, 'style', 'DarkStyle'), pygal.style.DarkStyle) # Create export folder try: os.makedirs(self.path) except OSError as e: if e.errno != errno.EEXIST: logger.critical( "Cannot create the Graph output folder {} ({})".format( self.path, e)) sys.exit(2) # Check if output folder is writeable try: tempfile.TemporaryFile(dir=self.path) except OSError as e: logger.critical("Graph output folder {} is not writeable".format( self.path)) sys.exit(2) logger.info("Graphs will be created in the {} folder".format( self.path)) logger.info( "Graphs will be created when 'g' key is pressed (in the CLI interface)" ) if self.generate_every != 0: logger.info( "Graphs will be created automatically every {} seconds".format( self.generate_every)) # Start the timer self._timer = Timer(self.generate_every) else: self._timer = None
def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the screen. Catch key every 100 ms. INPUT stats: Stats database to display duration: duration of the loop cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server return_to_browser: True: Do not exist, return to the browser list False: Exit and return to the shell OUPUT True: Exit key has been pressed False: Others cases... """ # Flush display self.flush(stats, cs_status=cs_status) # If the duration is < 0 (update + export time > refresh_time) # Then display the interface and log a message if duration <= 0: logger.debug('Update and export time higher than refresh_time.') duration = 0.1 # Wait exitkey = False countdown = Timer(duration) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(return_to_browser=return_to_browser) # Is it an exit key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q')) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats, cs_status=cs_status) # Wait 100ms... self.wait() return exitkey
def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the screen. INPUT stats: Stats database to display duration: duration of the loop cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server return_to_browser: True: Do not exist, return to the browser list False: Exit and return to the shell OUTPUT True: Exit key has been pressed False: Others cases... """ # Flush display self.flush(stats, cs_status=cs_status) # If the duration is < 0 (update + export time > refresh_time) # Then display the interface and log a message if duration <= 0: logger.warning('Update and export time higher than refresh_time.') duration = 0.1 # Wait duration (in s) time exitkey = False countdown = Timer(duration) # Set the default timeout (in ms) for the getch method self.term_window.timeout(int(duration * 1000)) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(return_to_browser=return_to_browser) # Is it an exit key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q')) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats, cs_status=cs_status) # Overwrite the timeout with the countdown self.term_window.timeout(int(countdown.get() * 1000)) return exitkey
def get(self): """Get the first public IP address returned by one of the online services.""" q = queue.Queue() for u, j, k in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() return ', '.join(set([x.strip() for x in ip.split(',')]))
def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = {'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle} # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
def get(self): """Get the first public IP address returned by one of the online services""" q = queue.Queue() for u, j, k in urls: t = threading.Thread(target=self._get_ip_public, args=(q, u, j, k)) t.daemon = True t.start() timer = Timer(self.timeout) ip = None while not timer.finished() and ip is None: if q.qsize() > 0: ip = q.get() return ip
def __init__(self, config=None, args=None): # Init config self.config = config # Init args self.args = args # Init stats # Will be updated within Bottle route self.stats = None # cached_time is the minimum time interval between stats updates # i.e. HTTP/Restful calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) # Load configuration file self.load_config(config) # Init Bottle self._app = Bottle() # Enable CORS (issue #479) self._app.install(EnableCors()) # Password if args.password != '': self._app.install(auth_basic(self.check_auth)) # Define routes self._route() # Path where the statics files are stored self.STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static/public') # Paths for templates TEMPLATE_PATH.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static/templates'))
def __init__(self, args=None, config=None): """Init the plugin.""" super(Plugin, self).__init__(args=args) self.args = args self.config = config # We want to display the stat in the curse interface self.display_curse = True # Init stats self.stats = GlancesPortsList(config=config, args=args).get_ports_list() # Init global Timer self.timer_ports = Timer(0) # Global Thread running all the scans self._thread = None
def __init__(self, name=None, args=None): """Init AMP classe.""" logger.debug("AMP - Init {} version {}".format(self.NAME, self.VERSION)) # AMP name (= module name without glances_) if name is None: self.amp_name = self.__class__.__module__[len('glances_'):] else: self.amp_name = name # Init the args self.args = args # Init the configs self.configs = {} # A timer is needed to only update every refresh seconds # Init to 0 in order to update the AMP on startup self.timer = Timer(0)
def update(self, stats, cs_status=None, return_to_browser=False): """Update the screen. Wait for __refresh_time sec / catch key every 100 ms. INPUT stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server return_to_browser: True: Do not exist, return to the browser list False: Exit and return to the shell OUPUT True: Exit key has been pressed False: Others cases... """ # Flush display self.flush(stats, cs_status=cs_status) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(return_to_browser=return_to_browser) # Is it an exit key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q')) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats, cs_status=cs_status) # Wait 100ms... curses.napms(100) return exitkey
def update(self, stats, cs_status=None, return_to_browser=False): """Update the screen. Wait for __refresh_time sec / catch key every 100 ms. INPUT stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server return_to_browser: True: Do not exist, return to the browser list False: Exit and return to the shell OUPUT True: Exit key has been pressed False: Others cases... """ # Flush display self.flush(stats, cs_status=cs_status) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(return_to_browser=return_to_browser) # Is it an exit key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q')) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats, cs_status=cs_status) # Wait 100ms... self.wait() return exitkey
def __init__(self, config=None, args=None): """Init the export IF.""" super(Export, self).__init__(config=config, args=args) # Load the Graph configuration file section (is exists) self.export_enable = self.load_conf('graph', options=['path', 'generate_every', 'width', 'height', 'style']) # Manage options (command line arguments overwrite configuration file) self.path = args.export_graph_path or self.path self.generate_every = int(getattr(self, 'generate_every', 0)) self.width = int(getattr(self, 'width', 800)) self.height = int(getattr(self, 'height', 600)) self.style = getattr(pygal.style, getattr(self, 'style', 'DarkStyle'), pygal.style.DarkStyle) # Create export folder try: os.makedirs(self.path) except OSError as e: if e.errno != errno.EEXIST: logger.critical("Cannot create the Graph output folder {} ({})".format(self.path, e)) sys.exit(2) # Check if output folder is writeable try: tempfile.TemporaryFile(dir=self.path) except OSError as e: logger.critical("Graph output folder {} is not writeable".format(self.path)) sys.exit(2) logger.info("Graphs will be created in the {} folder".format(self.path)) logger.info("Graphs will be created when 'g' key is pressed (in the CLI interface)") if self.generate_every != 0: logger.info("Graphs will be created automatically every {} seconds".format(self.generate_every)) # Start the timer self._timer = Timer(self.generate_every) else: self._timer = None
def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """ for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory value['indice'] = str(l) value['path'] = self.config.get_value(section, key + 'path') if value['path'] is None: continue else: value['path'] = nativestr(value['path']) # Optional conf keys # Refresh time value['refresh'] = int( self.config.get_value(section, key + 'refresh', default=self.__default_refresh)) self.timer_folders.append(Timer(value['refresh'])) # Thesholds for i in ['careful', 'warning', 'critical']: # Read threshold value[i] = self.config.get_value(section, key + i) if value[i] is not None: logger.debug("{} threshold for folder {} is {}".format( i, value["path"], value[i])) # Read action action = self.config.get_value(section, key + i + '_action') if action is not None: value[i + '_action'] = action logger.debug("{} action for folder {} is {}".format( i, value["path"], value[i + '_action'])) # Add the item to the list self.__folder_list.append(value)
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._process_filter = None self._process_filter_re = None # Whether or not to hide kernel threads self.no_kernel_threads = False def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter(self): """Get the process filter.""" return self._process_filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format(self.process_filter)) except Exception: logger.error("Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._process_filter_re def is_filtered(self, value): """Return True if the value should be filtered.""" if self.process_filter is None: # No filter => Not filtered return False else: try: return self.process_filter_re.match(' '.join(value)) is None except AttributeError: # Filter processes crashs with a bad regular expression pattern (issue #665) return False def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: need for the sorting/filter step. => cpu_percent, memory_percent, io_counters, name, cmdline """ procstat['mandatory_stats'] = True # Process CPU, MEM percent and name try: procstat.update(proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent', 'name', 'cpu_times'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Process command line (cached with internal cache) try: self.cmdline_cache[procstat['pid']] except KeyError: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Note Disk IO stat not available on Mac OS if not OSX: try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for # display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + \ self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: for all the displayed processes. => username, status, memory_info, cpu_times """ procstat['standard_stats'] = True # Process username (cached with internal cache) try: self.username_cache[procstat['pid']] except KeyError: try: self.username_cache[procstat['pid']] = proc.username() except psutil.NoSuchProcess: self.username_cache[procstat['pid']] = "?" except (KeyError, psutil.AccessDenied): try: self.username_cache[procstat['pid']] = proc.uids().real except (KeyError, AttributeError, psutil.AccessDenied): self.username_cache[procstat['pid']] = "?" procstat['username'] = self.username_cache[procstat['pid']] # Process status, nice, memory_info and cpu_times try: procstat.update( proc.as_dict(attrs=['status', 'nice', 'memory_info', 'cpu_times'])) except psutil.NoSuchProcess: pass else: procstat['status'] = str(procstat['status'])[:1].upper() return procstat def __get_extended_stats(self, proc, procstat): """ Get extended_stats: only for top processes (see issue #403). => connections (UDP/TCP), memory_swap... """ procstat['extended_stats'] = True # CPU affinity (Windows and Linux only) try: procstat.update(proc.as_dict(attrs=['cpu_affinity'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['cpu_affinity'] = None # Memory extended try: procstat.update(proc.as_dict(attrs=['memory_info_ex'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['memory_info_ex'] = None # Number of context switch try: procstat.update(proc.as_dict(attrs=['num_ctx_switches'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_ctx_switches'] = None # Number of file descriptors (Unix only) try: procstat.update(proc.as_dict(attrs=['num_fds'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_fds'] = None # Threads number try: procstat.update(proc.as_dict(attrs=['num_threads'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_threads'] = None # Number of handles (Windows only) if WINDOWS: try: procstat.update(proc.as_dict(attrs=['num_handles'])) except psutil.NoSuchProcess: pass else: procstat['num_handles'] = None # SWAP memory (Only on Linux based OS) # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ if LINUX: try: procstat['memory_swap'] = sum( [v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except psutil.AccessDenied: procstat['memory_swap'] = None except Exception: # Add a dirty except to handle the PsUtil issue #413 procstat['memory_swap'] = None # Process network connections (TCP and UDP) try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except Exception: procstat['tcp'] = None procstat['udp'] = None # IO Nice # http://pythonhosted.org/psutil/#psutil.Process.ionice if LINUX or WINDOWS: try: procstat.update(proc.as_dict(attrs=['ionice'])) except psutil.NoSuchProcess: pass else: procstat['ionice'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesInstance(object): """All the methods of this class are published as XML-RPC methods.""" def __init__(self, cached_time=1, config=None): # Init stats self.stats = GlancesStatsServer(config) # Initial update self.stats.update() # cached_time is the minimum time interval between stats updates # i.e. XML/RPC calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) self.cached_time = cached_time def __update__(self): # Never update more than 1 time per cached_time if self.timer.finished(): self.stats.update() self.timer = Timer(self.cached_time) def init(self): # Return the Glances version return __version__ def getAll(self): # Update and return all the stats self.__update__() return json.dumps(self.stats.getAll()) def getAllPlugins(self): # Return the plugins list return json.dumps(self.stats.getAllPlugins()) def getAllLimits(self): # Return all the plugins limits return json.dumps(self.stats.getAllLimitsAsDict()) def getAllViews(self): # Return all the plugins views return json.dumps(self.stats.getAllViewsAsDict()) def __getattr__(self, item): """Overwrite the getattr method in case of attribute is not found. The goal is to dynamically generate the API get'Stats'() methods. """ header = 'get' # Check if the attribute starts with 'get' if item.startswith(header): try: # Update the stat self.__update__() # Return the attribute return getattr(self.stats, item) except Exception: # The method is not found for the plugin raise AttributeError(item) else: # Default behavior raise AttributeError(item)
def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because psutil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Init stats self.auto_sort = None self._sort_key = None # Default processes sort key is 'auto' # Can be overwrite from the configuration file (issue#1536) => See glances_processlist.py init self.set_sort_key('auto', auto=True) self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Test if the system can grab io_counters try: p = psutil.Process() p.io_counters() except Exception as e: logger.warning( 'PsUtil can not grab processes io_counters ({})'.format(e)) self.disable_io_counters = True else: logger.debug('PsUtil can grab processes io_counters') self.disable_io_counters = False # Test if the system can grab gids try: p = psutil.Process() p.gids() except Exception as e: logger.warning('PsUtil can not grab processes gids ({})'.format(e)) self.disable_gids = True else: logger.debug('PsUtil can grab processes gids') self.disable_gids = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values()
class Plugin(GlancesPlugin): """Glances ports scanner plugin.""" def __init__(self, args=None, config=None): """Init the plugin.""" super(Plugin, self).__init__(args=args) self.args = args self.config = config # We want to display the stat in the curse interface self.display_curse = True # Init stats self.stats = GlancesPortsList(config=config, args=args).get_ports_list() # Init global Timer self.timer_ports = Timer(0) # Global Thread running all the scans self._thread = None def exit(self): """Overwrite the exit method to close threads""" if self._thread is not None: self._thread.stop() # Call the father class super(Plugin, self).exit() @GlancesPlugin._log_result_decorator def update(self): """Update the ports list.""" if self.input_method == 'local': # Only refresh: # * if there is not other scanning thread # * every refresh seconds (define in the configuration file) if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.isAlive() if self.timer_ports.finished() and not thread_is_running: # Run ports scanner self._thread = ThreadScanner(self.stats) self._thread.start() # Restart timer if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0]['refresh']) else: self.timer_ports = Timer(0) else: # Not available in SNMP mode pass return self.stats def get_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" if port['status'] is None: return 'CAREFUL' elif port['status'] == 0: return 'CRITICAL' elif isinstance(port['status'], (float, int)) and \ port['rtt_warning'] is not None and \ port['status'] > port['rtt_warning']: return 'WARNING' return 'OK' def msg_curse(self, args=None): """Return the dict to display in the curse interface.""" # Init the return message # Only process if stats exist and display plugin enable... ret = [] if not self.stats or args.disable_ports: return ret # Build the string message for p in self.stats: if p['status'] is None: status = 'Scanning' elif isinstance(p['status'], bool_type) and p['status'] is True: status = 'Open' elif p['status'] == 0: status = 'Timeout' else: # Convert second to ms status = '{0:.0f}ms'.format(p['status'] * 1000.0) msg = '{:14.14} '.format(p['description']) ret.append(self.curse_add_line(msg)) msg = '{:>8}'.format(status) ret.append(self.curse_add_line(msg, self.get_alert(p))) ret.append(self.curse_new_line()) # Delete the last empty line try: ret.pop() except IndexError: pass return ret def _port_scan_all(self, stats): """Scan all host/port of the given stats""" for p in stats: self._port_scan(p) # Had to wait between two scans # If not, result are not ok time.sleep(1)
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._process_filter = None self._process_filter_re = None # Whether or not to hide kernel threads self.no_kernel_threads = False def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter(self): """Get the process filter.""" return self._process_filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format( self.process_filter)) except Exception: logger.error( "Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._process_filter_re def is_filtered(self, value): """Return True if the value should be filtered.""" if self.process_filter is None: # No filter => Not filtered return False else: try: return self.process_filter_re.match(' '.join(value)) is None except AttributeError: # Filter processes crashs with a bad regular expression pattern (issue #665) return False def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: need for the sorting/filter step. => cpu_percent, memory_percent, io_counters, name, cmdline """ procstat['mandatory_stats'] = True # Process CPU, MEM percent and name try: procstat.update( proc.as_dict(attrs=[ 'username', 'cpu_percent', 'memory_percent', 'name', 'cpu_times' ], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Process command line (cached with internal cache) try: self.cmdline_cache[procstat['pid']] except KeyError: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Note Disk IO stat not available on Mac OS if not OSX: try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for # display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + \ self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: for all the displayed processes. => username, status, memory_info, cpu_times """ procstat['standard_stats'] = True # Process username (cached with internal cache) try: self.username_cache[procstat['pid']] except KeyError: try: self.username_cache[procstat['pid']] = proc.username() except psutil.NoSuchProcess: self.username_cache[procstat['pid']] = "?" except (KeyError, psutil.AccessDenied): try: self.username_cache[procstat['pid']] = proc.uids().real except (KeyError, AttributeError, psutil.AccessDenied): self.username_cache[procstat['pid']] = "?" procstat['username'] = self.username_cache[procstat['pid']] # Process status, nice, memory_info and cpu_times try: procstat.update( proc.as_dict( attrs=['status', 'nice', 'memory_info', 'cpu_times'])) except psutil.NoSuchProcess: pass else: procstat['status'] = str(procstat['status'])[:1].upper() return procstat def __get_extended_stats(self, proc, procstat): """ Get extended_stats: only for top processes (see issue #403). => connections (UDP/TCP), memory_swap... """ procstat['extended_stats'] = True # CPU affinity (Windows and Linux only) try: procstat.update(proc.as_dict(attrs=['cpu_affinity'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['cpu_affinity'] = None # Memory extended try: procstat.update(proc.as_dict(attrs=['memory_info_ex'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['memory_info_ex'] = None # Number of context switch try: procstat.update(proc.as_dict(attrs=['num_ctx_switches'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_ctx_switches'] = None # Number of file descriptors (Unix only) try: procstat.update(proc.as_dict(attrs=['num_fds'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_fds'] = None # Threads number try: procstat.update(proc.as_dict(attrs=['num_threads'])) except psutil.NoSuchProcess: pass except AttributeError: procstat['num_threads'] = None # Number of handles (Windows only) if WINDOWS: try: procstat.update(proc.as_dict(attrs=['num_handles'])) except psutil.NoSuchProcess: pass else: procstat['num_handles'] = None # SWAP memory (Only on Linux based OS) # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ if LINUX: try: procstat['memory_swap'] = sum( [v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except psutil.AccessDenied: procstat['memory_swap'] = None except Exception: # Add a dirty except to handle the PsUtil issue #413 procstat['memory_swap'] = None # Process network connections (TCP and UDP) try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except Exception: procstat['tcp'] = None procstat['udp'] = None # IO Nice # http://pythonhosted.org/psutil/#psutil.Process.ionice if LINUX or WINDOWS: try: procstat.update(proc.as_dict(attrs=['ionice'])) except psutil.NoSuchProcess: pass else: procstat['ionice'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format( self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesBottle(object): """This class manages the Bottle Web server.""" API_VERSION = '3' def __init__(self, config=None, args=None): # Init config self.config = config # Init args self.args = args # Init stats # Will be updated within Bottle route self.stats = None # cached_time is the minimum time interval between stats updates # i.e. HTTP/RESTful calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) # Load configuration file self.load_config(config) # Set the bind URL self.bind_url = 'http://{}:{}/'.format(self.args.bind_address, self.args.port) # Init Bottle self._app = Bottle() # Enable CORS (issue #479) self._app.install(EnableCors()) # Password if args.password != '': self._app.install(auth_basic(self.check_auth)) # Define routes self._route() # Path where the statics files are stored self.STATIC_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static/public') # Paths for templates TEMPLATE_PATH.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static/templates')) def load_config(self, config): """Load the outputs section of the configuration file.""" # Limit the number of processes to display in the WebUI if config is not None and config.has_section('outputs'): logger.debug('Read number of processes to display in the WebUI') n = config.get_value('outputs', 'max_processes_display', default=None) logger.debug('Number of processes to display in the WebUI: {}'.format(n)) def __update__(self): # Never update more than 1 time per cached_time if self.timer.finished(): self.stats.update() self.timer = Timer(self.args.cached_time) def app(self): return self._app() def check_auth(self, username, password): """Check if a username/password combination is valid.""" if username == self.args.username: from glances.password import GlancesPassword pwd = GlancesPassword() return pwd.check_password(self.args.password, pwd.sha256_hash(password)) else: return False def _route(self): """Define route.""" # REST API self._app.route('/api/%s/config' % self.API_VERSION, method="GET", callback=self._api_config) self._app.route('/api/%s/config/<item>' % self.API_VERSION, method="GET", callback=self._api_config_item) self._app.route('/api/%s/args' % self.API_VERSION, method="GET", callback=self._api_args) self._app.route('/api/%s/args/<item>' % self.API_VERSION, method="GET", callback=self._api_args_item) self._app.route('/api/%s/help' % self.API_VERSION, method="GET", callback=self._api_help) self._app.route('/api/%s/pluginslist' % self.API_VERSION, method="GET", callback=self._api_plugins) self._app.route('/api/%s/all' % self.API_VERSION, method="GET", callback=self._api_all) self._app.route('/api/%s/all/limits' % self.API_VERSION, method="GET", callback=self._api_all_limits) self._app.route('/api/%s/all/views' % self.API_VERSION, method="GET", callback=self._api_all_views) self._app.route('/api/%s/<plugin>' % self.API_VERSION, method="GET", callback=self._api) self._app.route('/api/%s/<plugin>/history' % self.API_VERSION, method="GET", callback=self._api_history) self._app.route('/api/%s/<plugin>/history/<nb:int>' % self.API_VERSION, method="GET", callback=self._api_history) self._app.route('/api/%s/<plugin>/limits' % self.API_VERSION, method="GET", callback=self._api_limits) self._app.route('/api/%s/<plugin>/views' % self.API_VERSION, method="GET", callback=self._api_views) self._app.route('/api/%s/<plugin>/<item>' % self.API_VERSION, method="GET", callback=self._api_item) self._app.route('/api/%s/<plugin>/<item>/history' % self.API_VERSION, method="GET", callback=self._api_item_history) self._app.route('/api/%s/<plugin>/<item>/history/<nb:int>' % self.API_VERSION, method="GET", callback=self._api_item_history) self._app.route('/api/%s/<plugin>/<item>/<value>' % self.API_VERSION, method="GET", callback=self._api_value) bindmsg = 'Glances RESTful API Server started on {}api/{}/'.format(self.bind_url, self.API_VERSION) logger.info(bindmsg) # WEB UI if not self.args.disable_webui: self._app.route('/', method="GET", callback=self._index) self._app.route('/<refresh_time:int>', method=["GET"], callback=self._index) self._app.route('/<filepath:path>', method="GET", callback=self._resource) bindmsg = 'Glances Web User Interface started on {}'.format(self.bind_url) logger.info(bindmsg) else: logger.info('The WebUI is disable (--disable-webui)') print(bindmsg) def start(self, stats): """Start the bottle.""" # Init stats self.stats = stats # Init plugin list self.plugins_list = self.stats.getPluginsList() # Bind the Bottle TCP address/port if self.args.open_web_browser: # Implementation of the issue #946 # Try to open the Glances Web UI in the default Web browser if: # 1) --open-web-browser option is used # 2) Glances standalone mode is running on Windows OS webbrowser.open(self.bind_url, new=2, autoraise=1) self._app.run(host=self.args.bind_address, port=self.args.port, quiet=not self.args.debug) def end(self): """End the bottle.""" pass def _index(self, refresh_time=None): """Bottle callback for index.html (/) file.""" if refresh_time is None or refresh_time < 1: refresh_time = self.args.time # Update the stat self.__update__() # Display return template("index.html", refresh_time=refresh_time) def _resource(self, filepath): """Bottle callback for resources files.""" # Return the static file return static_file(filepath, root=self.STATIC_PATH) @compress def _api_help(self): """Glances API RESTful implementation. Return the help data or 404 error. """ response.content_type = 'application/json; charset=utf-8' # Update the stat view_data = self.stats.get_plugin("help").get_view_data() try: plist = json.dumps(view_data, sort_keys=True) except Exception as e: abort(404, "Cannot get help view data (%s)" % str(e)) return plist @compress def _api_plugins(self): """Glances API RESTFul implementation. @api {get} /api/%s/pluginslist Get plugins list @apiVersion 2.0 @apiName pluginslist @apiGroup plugin @apiSuccess {String[]} Plugins list. @apiSuccessExample Success-Response: HTTP/1.1 200 OK [ "load", "help", "ip", "memswap", "processlist", ... ] @apiError Cannot get plugin list. @apiErrorExample Error-Response: HTTP/1.1 404 Not Found """ response.content_type = 'application/json; charset=utf-8' # Update the stat self.__update__() try: plist = json.dumps(self.plugins_list) except Exception as e: abort(404, "Cannot get plugin list (%s)" % str(e)) return plist @compress def _api_all(self): """Glances API RESTful implementation. Return the JSON representation of all the plugins HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if self.args.debug: fname = os.path.join(tempfile.gettempdir(), 'glances-debug.json') try: with open(fname) as f: return f.read() except IOError: logger.debug("Debug file (%s) not found" % fname) # Update the stat self.__update__() try: # Get the JSON value of the stat ID statval = json.dumps(self.stats.getAllAsDict()) except Exception as e: abort(404, "Cannot get stats (%s)" % str(e)) return statval @compress def _api_all_limits(self): """Glances API RESTful implementation. Return the JSON representation of all the plugins limits HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' try: # Get the JSON value of the stat limits limits = json.dumps(self.stats.getAllLimitsAsDict()) except Exception as e: abort(404, "Cannot get limits (%s)" % (str(e))) return limits @compress def _api_all_views(self): """Glances API RESTful implementation. Return the JSON representation of all the plugins views HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' try: # Get the JSON value of the stat view limits = json.dumps(self.stats.getAllViewsAsDict()) except Exception as e: abort(404, "Cannot get views (%s)" % (str(e))) return limits @compress def _api(self, plugin): """Glances API RESTful implementation. Return the JSON representation of a given plugin HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if plugin not in self.plugins_list: abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list)) # Update the stat self.__update__() try: # Get the JSON value of the stat ID statval = self.stats.get_plugin(plugin).get_stats() except Exception as e: abort(404, "Cannot get plugin %s (%s)" % (plugin, str(e))) return statval @compress def _api_history(self, plugin, nb=0): """Glances API RESTful implementation. Return the JSON representation of a given plugin history Limit to the last nb items (all if nb=0) HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if plugin not in self.plugins_list: abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list)) # Update the stat self.__update__() try: # Get the JSON value of the stat ID statval = self.stats.get_plugin(plugin).get_stats_history(nb=int(nb)) except Exception as e: abort(404, "Cannot get plugin history %s (%s)" % (plugin, str(e))) return statval @compress def _api_limits(self, plugin): """Glances API RESTful implementation. Return the JSON limits of a given plugin HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if plugin not in self.plugins_list: abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list)) # Update the stat # self.__update__() try: # Get the JSON value of the stat limits ret = self.stats.get_plugin(plugin).limits except Exception as e: abort(404, "Cannot get limits for plugin %s (%s)" % (plugin, str(e))) return ret @compress def _api_views(self, plugin): """Glances API RESTful implementation. Return the JSON views of a given plugin HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if plugin not in self.plugins_list: abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list)) # Update the stat # self.__update__() try: # Get the JSON value of the stat views ret = self.stats.get_plugin(plugin).get_views() except Exception as e: abort(404, "Cannot get views for plugin %s (%s)" % (plugin, str(e))) return ret @compress def _api_itemvalue(self, plugin, item, value=None, history=False, nb=0): """Father method for _api_item and _api_value.""" response.content_type = 'application/json; charset=utf-8' if plugin not in self.plugins_list: abort(400, "Unknown plugin %s (available plugins: %s)" % (plugin, self.plugins_list)) # Update the stat self.__update__() if value is None: if history: ret = self.stats.get_plugin(plugin).get_stats_history(item, nb=int(nb)) else: ret = self.stats.get_plugin(plugin).get_stats_item(item) if ret is None: abort(404, "Cannot get item %s%s in plugin %s" % (item, 'history ' if history else '', plugin)) else: if history: # Not available ret = None else: ret = self.stats.get_plugin(plugin).get_stats_value(item, value) if ret is None: abort(404, "Cannot get item %s(%s=%s) in plugin %s" % ('history ' if history else '', item, value, plugin)) return ret @compress def _api_item(self, plugin, item): """Glances API RESTful implementation. Return the JSON representation of the couple plugin/item HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ return self._api_itemvalue(plugin, item) @compress def _api_item_history(self, plugin, item, nb=0): """Glances API RESTful implementation. Return the JSON representation of the couple plugin/history of item HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ return self._api_itemvalue(plugin, item, history=True, nb=int(nb)) @compress def _api_value(self, plugin, item, value): """Glances API RESTful implementation. Return the process stats (dict) for the given item=value HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ return self._api_itemvalue(plugin, item, value) @compress def _api_config(self): """Glances API RESTful implementation. Return the JSON representation of the Glances configuration file HTTP/200 if OK HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' try: # Get the JSON value of the config' dict args_json = json.dumps(self.config.as_dict()) except Exception as e: abort(404, "Cannot get config (%s)" % str(e)) return args_json @compress def _api_config_item(self, item): """Glances API RESTful implementation. Return the JSON representation of the Glances configuration item HTTP/200 if OK HTTP/400 if item is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' config_dict = self.config.as_dict() if item not in config_dict: abort(400, "Unknown configuration item %s" % item) try: # Get the JSON value of the config' dict args_json = json.dumps(config_dict[item]) except Exception as e: abort(404, "Cannot get config item (%s)" % str(e)) return args_json @compress def _api_args(self): """Glances API RESTful implementation. Return the JSON representation of the Glances command line arguments HTTP/200 if OK HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' try: # Get the JSON value of the args' dict # Use vars to convert namespace to dict # Source: https://docs.python.org/%s/library/functions.html#vars args_json = json.dumps(vars(self.args)) except Exception as e: abort(404, "Cannot get args (%s)" % str(e)) return args_json @compress def _api_args_item(self, item): """Glances API RESTful implementation. Return the JSON representation of the Glances command line arguments item HTTP/200 if OK HTTP/400 if item is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if item not in self.args: abort(400, "Unknown argument item %s" % item) try: # Get the JSON value of the args' dict # Use vars to convert namespace to dict # Source: https://docs.python.org/%s/library/functions.html#vars args_json = json.dumps(vars(self.args)[item]) except Exception as e: abort(404, "Cannot get args item (%s)" % str(e)) return args_json
class Export(GlancesExport): """This class manages the Graph export module.""" def __init__(self, config=None, args=None): """Init the export IF.""" super(Export, self).__init__(config=config, args=args) # Load the Graph configuration file section (is exists) self.export_enable = self.load_conf( 'graph', options=['path', 'generate_every', 'width', 'height', 'style']) # Manage options (command line arguments overwrite configuration file) self.path = args.export_graph_path or self.path self.generate_every = int(getattr(self, 'generate_every', 0)) self.width = int(getattr(self, 'width', 800)) self.height = int(getattr(self, 'height', 600)) self.style = getattr(pygal.style, getattr(self, 'style', 'DarkStyle'), pygal.style.DarkStyle) # Create export folder try: os.makedirs(self.path) except OSError as e: if e.errno != errno.EEXIST: logger.critical( "Cannot create the Graph output folder {} ({})".format( self.path, e)) sys.exit(2) # Check if output folder is writeable try: tempfile.TemporaryFile(dir=self.path) except OSError as e: logger.critical("Graph output folder {} is not writeable".format( self.path)) sys.exit(2) logger.info("Graphs will be created in the {} folder".format( self.path)) logger.info( "Graphs will be created when 'g' key is pressed (in the CLI interface)" ) if self.generate_every != 0: logger.info( "Graphs will be created automatically every {} seconds".format( self.generate_every)) # Start the timer self._timer = Timer(self.generate_every) else: self._timer = None def exit(self): """Close the files.""" logger.debug("Finalise export interface %s" % self.export_name) def update(self, stats): """Generate Graph file in the output folder.""" if self.generate_every != 0 and self._timer.finished(): self.args.generate_graph = True self._timer.reset() if not self.args.generate_graph: return plugins = stats.getPluginsList() for plugin_name in plugins: plugin = stats._plugins[plugin_name] if plugin_name in self.plugins_to_export(): self.export(plugin_name, plugin.get_export_history()) logger.info("Graphs created in the folder {}".format(self.path)) self.args.generate_graph = False def export(self, title, data): """Generate graph from the data. Example for the mem plugin: {'percent': [ (datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8), (datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9), (datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0), ... ] } Return: * True if the graph have been generated * False if the graph have not been generated """ if data == {}: return False chart = DateTimeLine( title=title.capitalize(), width=self.width, height=self.height, style=self.style, show_dots=False, legend_at_bottom=True, x_label_rotation=20, x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S')) for k, v in iteritems(time_serie_subsample(data, self.width)): chart.add(k, v) chart.render_to_file(os.path.join(self.path, title + '.svg')) return True
class Export(GlancesExport): """This class manages the Graph export module.""" def __init__(self, config=None, args=None): """Init the export IF.""" super(Export, self).__init__(config=config, args=args) # Load the Graph configuration file section (is exists) self.export_enable = self.load_conf('graph', options=['path', 'generate_every', 'width', 'height', 'style']) # Manage options (command line arguments overwrite configuration file) self.path = args.export_graph_path or self.path self.generate_every = int(getattr(self, 'generate_every', 0)) self.width = int(getattr(self, 'width', 800)) self.height = int(getattr(self, 'height', 600)) self.style = getattr(pygal.style, getattr(self, 'style', 'DarkStyle'), pygal.style.DarkStyle) # Create export folder try: os.makedirs(self.path) except OSError as e: if e.errno != errno.EEXIST: logger.critical("Cannot create the Graph output folder {} ({})".format(self.path, e)) sys.exit(2) # Check if output folder is writeable try: tempfile.TemporaryFile(dir=self.path) except OSError as e: logger.critical("Graph output folder {} is not writeable".format(self.path)) sys.exit(2) logger.info("Graphs will be created in the {} folder".format(self.path)) logger.info("Graphs will be created when 'g' key is pressed (in the CLI interface)") if self.generate_every != 0: logger.info("Graphs will be created automatically every {} seconds".format(self.generate_every)) # Start the timer self._timer = Timer(self.generate_every) else: self._timer = None def exit(self): """Close the files.""" logger.debug("Finalise export interface %s" % self.export_name) def update(self, stats): """Generate Graph file in the output folder.""" if self.generate_every != 0 and self._timer.finished(): self.args.generate_graph = True self._timer.reset() if not self.args.generate_graph: return plugins = stats.getPluginsList() for plugin_name in plugins: plugin = stats._plugins[plugin_name] if plugin_name in self.plugins_to_export(): self.export(plugin_name, plugin.get_export_history()) logger.info("Graphs created in the folder {}".format(self.path)) self.args.generate_graph = False def export(self, title, data): """Generate graph from the data. Example for the mem plugin: {'percent': [ (datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8), (datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9), (datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0), ... ] } Return: * True if the graph have been generated * False if the graph have not been generated """ if data == {}: return False chart = DateTimeLine(title=title.capitalize(), width=self.width, height=self.height, style=self.style, show_dots=False, legend_at_bottom=True, x_label_rotation=20, x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S')) for k, v in iteritems(time_serie_subsample(data, self.width)): chart.add(k, v) chart.render_to_file(os.path.join(self.path, title + '.svg')) return True
class GlancesActions(object): """This class manage action if an alert is reached.""" def __init__(self, args=None): """Init GlancesActions class.""" # Dict with the criticity status # - key: stat_name # - value: criticity # Goal: avoid to execute the same command twice self.status = {} # Add a timer to avoid any trigger when Glances is started (issue#732) # Action can be triggered after refresh * 2 seconds if hasattr(args, 'time'): self.start_timer = Timer(args.time * 2) else: self.start_timer = Timer(3) def get(self, stat_name): """Get the stat_name criticity.""" try: return self.status[stat_name] except KeyError: return None def set(self, stat_name, criticity): """Set the stat_name to criticity.""" self.status[stat_name] = criticity def run(self, stat_name, criticity, commands, repeat, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - If True, then repeat the action - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if (self.get(stat_name) == criticity and not repeat) or \ not self.start_timer.finished(): # Action already executed => Exit return False logger.debug("{} action {} for {} ({}) with stats {}".format( "Repeat" if repeat else "Run", commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {} ({}): {}".format(stat_name, criticity, cmd_full)) logger.debug("Stats value for the trigger: {}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({})".format(e)) self.set(stat_name, criticity) return True
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None} def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: for all processes. Needed for the sorting/filter step. Stats grabbed inside this method: * 'name', 'cpu_times', 'status', 'ppid' * 'username', 'cpu_percent', 'memory_percent' """ procstat['mandatory_stats'] = True # Name, cpu_times, status and ppid stats are in the same /proc file # Optimisation fir issue #958 try: procstat.update(proc.as_dict( attrs=['name', 'cpu_times', 'status', 'ppid'], ad_value='')) except (psutil.NoSuchProcess, psutil.AccessDenied): # Try/catch for issue #432 (process no longer exist) # Try/catch for issue #1120 (only see on Macos) return None else: procstat['status'] = str(procstat['status'])[:1].upper() try: procstat.update(proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent'], ad_value='')) except (psutil.NoSuchProcess, psutil.AccessDenied): # Try/catch for issue #432 (process no longer exist) return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Compute the maximum value for cpu_percent and memory_percent for k in self._max_values_list: if procstat[k] > self.get_max_values(k): self.set_max_values(k, procstat[k]) # Process command line (cached with internal cache) if procstat['pid'] not in self.cmdline_cache: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, EnvironmentError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Availability: all platforms except macOS and Illumos/Solaris try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 except AttributeError: return procstat else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: only for displayed processes. Stats grabbed inside this method: * nice and memory_info """ procstat['standard_stats'] = True # Process nice and memory_info (issue #926) try: procstat.update( proc.as_dict(attrs=['nice', 'memory_info'])) except psutil.NoSuchProcess: pass return procstat def __get_extended_stats(self, proc, procstat): """ Get extended stats, only for top processes (see issue #403). - cpu_affinity (Linux, Windows, FreeBSD) - ionice (Linux and Windows > Vista) - memory_full_info (Linux) - num_ctx_switches (not available on Illumos/Solaris) - num_fds (Unix-like) - num_handles (Windows) - num_threads (not available on *BSD) - memory_maps (only swap, Linux) https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ - connections (TCP and UDP) """ procstat['extended_stats'] = True for stat in ['cpu_affinity', 'ionice', 'memory_full_info', 'num_ctx_switches', 'num_fds', 'num_handles', 'num_threads']: try: procstat.update(proc.as_dict(attrs=[stat])) except psutil.NoSuchProcess: pass # XXX: psutil>=4.3.1 raises ValueError while <4.3.1 raises AttributeError except (ValueError, AttributeError): procstat[stat] = None if LINUX: try: procstat['memory_swap'] = sum([v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, TypeError, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). # XXX: Remove TypeError once we'll drop psutil < 3.0.0. procstat['memory_swap'] = None try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except psutil.AccessDenied: procstat['tcp'] = None procstat['udp'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of a running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Reset the max dict self.reset_max_values() # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on macOS # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or MACOS and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format(self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class GlancesAmp(object): """Main class for Glances AMP.""" NAME = '?' VERSION = '?' DESCRIPTION = '?' AUTHOR = '?' EMAIL = '?' def __init__(self, name=None, args=None): """Init AMP classe.""" logger.debug("AMP - Init {} version {}".format(self.NAME, self.VERSION)) # AMP name (= module name without glances_) if name is None: self.amp_name = self.__class__.__module__[len('glances_'):] else: self.amp_name = name # Init the args self.args = args # Init the configs self.configs = {} # A timer is needed to only update every refresh seconds # Init to 0 in order to update the AMP on startup self.timer = Timer(0) def load_config(self, config): """Load AMP parameters from the configuration file.""" # Read AMP confifuration. # For ex, the AMP foo should have the following section: # # [foo] # enable=true # regex=\/usr\/bin\/nginx # refresh=60 # # and optionnaly: # # one_line=false # option1=opt1 # ... # amp_section = 'amp_' + self.amp_name if (hasattr(config, 'has_section') and config.has_section(amp_section)): logger.debug("AMP - {}: Load configuration".format(self.NAME)) for param, _ in config.items(amp_section): try: self.configs[param] = config.get_float_value(amp_section, param) except ValueError: self.configs[param] = config.get_value(amp_section, param).split(',') if len(self.configs[param]) == 1: self.configs[param] = self.configs[param][0] logger.debug("AMP - {}: Load parameter: {} = {}".format(self.NAME, param, self.configs[param])) else: logger.debug("AMP - {}: Can not find section {} in the configuration file".format(self.NAME, self.amp_name)) return False # enable, regex and refresh are mandatories # if not configured then AMP is disabled if self.enable(): for k in ['regex', 'refresh']: if k not in self.configs: logger.warning("AMP - {}: Can not find configuration key {} in section {}".format(self.NAME, k, self.amp_name)) self.configs['enable'] = 'false' else: logger.debug("AMP - {} is disabled".format(self.NAME)) # Init the count to 0 self.configs['count'] = 0 return self.enable() def get(self, key): """Generic method to get the item in the AMP configuration""" if key in self.configs: return self.configs[key] else: return None def enable(self): """Return True|False if the AMP is enabled in the configuration file (enable=true|false).""" ret = self.get('enable') if ret is None: return False else: return ret.lower().startswith('true') def regex(self): """Return regular expression used to identified the current application.""" return self.get('regex') def refresh(self): """Return refresh time in seconds for the current application monitoring process.""" return self.get('refresh') def one_line(self): """Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false).""" ret = self.get('one_line') if ret is None: return False else: return ret.lower().startswith('true') def time_until_refresh(self): """Return time in seconds until refresh.""" return self.timer.get() def should_update(self): """Return True is the AMP should be updated: - AMP is enable - only update every 'refresh' seconds """ if self.timer.finished(): self.timer.set(self.refresh()) self.timer.reset() return self.enable() return False def set_count(self, count): """Set the number of processes matching the regex""" self.configs['count'] = count def count(self): """Get the number of processes matching the regex""" return self.get('count') def count_min(self): """Get the minimum number of processes""" return self.get('countmin') def count_max(self): """Get the maximum number of processes""" return self.get('countmax') def set_result(self, result, separator=''): """Store the result (string) into the result key of the AMP if one_line is true then replace \n by separator """ if self.one_line(): self.configs['result'] = str(result).replace('\n', separator) else: self.configs['result'] = str(result) def result(self): """ Return the result of the AMP (as a string)""" ret = self.get('result') if ret is not None: ret = u(ret) return ret def update_wrapper(self, process_list): """Wrapper for the children update""" # Set the number of running process self.set_count(len(process_list)) # Call the children update method if self.should_update(): return self.update(process_list) else: return self.result()
class CpuPercent(object): """Get and store the CPU percent.""" def __init__(self, cached_time=1): self.cpu_percent = 0 self.percpu_percent = [] # cached_time is the minimum time interval between stats updates # since last update is passed (will retrieve old cached info instead) self.timer_cpu = Timer(0) self.timer_percpu = Timer(0) self.cached_time = cached_time def get_key(self): """Return the key of the per CPU list.""" return 'cpu_number' def get(self, percpu=False): """Update and/or return the CPU using the psutil library. If percpu, return the percpu stats""" if percpu: return self.__get_percpu() else: return self.__get_cpu() def __get_cpu(self): """Update and/or return the CPU using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_cpu.finished(): self.cpu_percent = psutil.cpu_percent(interval=0.0) # Reset timer for cache self.timer_cpu = Timer(self.cached_time) return self.cpu_percent def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate( psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = { 'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle } # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
def __update__(self): # Never update more than 1 time per cached_time if self.timer.finished(): self.stats.update() self.timer = Timer(self.cached_time)
class GlancesInstance(object): """All the methods of this class are published as XML-RPC methods.""" def __init__(self, cached_time=1, config=None): # Init stats self.stats = GlancesStatsServer(config) # Initial update self.stats.update() # cached_time is the minimum time interval between stats updates # i.e. XML/RPC calls will not retrieve updated info until the time # since last update is passed (will retrieve old cached info instead) self.timer = Timer(0) self.cached_time = cached_time def __update__(self): # Never update more than 1 time per cached_time if self.timer.finished(): self.stats.update() self.timer = Timer(self.cached_time) def init(self): # Return the Glances version return version def getAll(self): # Update and return all the stats self.__update__() return json.dumps(self.stats.getAll()) def getAllPlugins(self): # Return the plugins list return json.dumps(self.stats.getAllPlugins()) def getAllLimits(self): # Return all the plugins limits return json.dumps(self.stats.getAllLimitsAsDict()) def getAllViews(self): # Return all the plugins views return json.dumps(self.stats.getAllViewsAsDict()) def getAllMonitored(self): # Return the processes monitored list # return json.dumps(self.monitors.getAll()) return json.dumps(self.stats.getAll()['monitor']) def __getattr__(self, item): """Overwrite the getattr method in case of attribute is not found. The goal is to dynamically generate the API get'Stats'() methods. """ header = 'get' # Check if the attribute starts with 'get' if item.startswith(header): try: # Update the stat self.__update__() # Return the attribute return getattr(self.stats, item) except Exception: # The method is not found for the plugin raise AttributeError(item) else: # Default behavior raise AttributeError(item)
class GlancesActions(object): """This class manage action if an alert is reached.""" def __init__(self, args=None): """Init GlancesActions class.""" # Dict with the criticity status # - key: stat_name # - value: criticity # Goal: avoid to execute the same command twice self.status = {} # Add a timer to avoid any trigger when Glances is started (issue#732) # Action can be triggered after refresh * 2 seconds if hasattr(args, 'time'): self.start_timer = Timer(args.time * 2) else: self.start_timer = Timer(3) def get(self, stat_name): """Get the stat_name criticity.""" try: return self.status[stat_name] except KeyError: return None def set(self, stat_name, criticity): """Set the stat_name to criticity.""" self.status[stat_name] = criticity def run(self, stat_name, criticity, commands, repeat, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - If True, then repeat the action - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if (self.get(stat_name) == criticity and not repeat) or \ not self.start_timer.finished(): # Action already executed => Exit return False logger.debug("{} action {} for {} ({}) with stats {}".format( "Repeat" if repeat else "Run", commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {} ({}): {}".format( stat_name, criticity, cmd_full)) logger.debug( "Stats value for the trigger: {}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({})".format(e)) self.set(stat_name, criticity) return True
class Plugin(GlancesPlugin): """Glances ports scanner plugin.""" def __init__(self, args=None, config=None): """Init the plugin.""" super(Plugin, self).__init__(args=args, stats_init_value=[]) self.args = args self.config = config # We want to display the stat in the curse interface self.display_curse = True # Init stats self.stats = GlancesPortsList(config=config, args=args).get_ports_list() + GlancesWebList(config=config, args=args).get_web_list() # Init global Timer self.timer_ports = Timer(0) # Global Thread running all the scans self._thread = None def exit(self): """Overwrite the exit method to close threads.""" if self._thread is not None: self._thread.stop() # Call the father class super(Plugin, self).exit() @GlancesPlugin._log_result_decorator def update(self): """Update the ports list.""" if self.input_method == 'local': # Only refresh: # * if there is not other scanning thread # * every refresh seconds (define in the configuration file) if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.isAlive() if self.timer_ports.finished() and not thread_is_running: # Run ports scanner self._thread = ThreadScanner(self.stats) self._thread.start() # Restart timer if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0]['refresh']) else: self.timer_ports = Timer(0) else: # Not available in SNMP mode pass return self.stats def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" if port['status'] is None: return 'CAREFUL' elif port['status'] == 0: return 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): return 'WARNING' return 'OK' def get_web_alert(self, web, header="", log=False): """Return the alert status relative to the web/url scan return value.""" if web['status'] is None: return 'CAREFUL' elif web['status'] not in [200, 301, 302]: return 'CRITICAL' elif web['rtt_warning'] is not None and web['elapsed'] > web['rtt_warning']: return 'WARNING' return 'OK' def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message # Only process if stats exist and display plugin enable... ret = [] if not self.stats or args.disable_ports: return ret # Max size for the interface name name_max_width = max_width - 7 # Build the string message for p in self.stats: if 'host' in p: if p['host'] is None: status = 'None' elif p['status'] is None: status = 'Scanning' elif isinstance(p['status'], bool_type) and p['status'] is True: status = 'Open' elif p['status'] == 0: status = 'Timeout' else: # Convert second to ms status = '{0:.0f}ms'.format(p['status'] * 1000.0) msg = '{:{width}}'.format(p['description'][0:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) msg = '{:>9}'.format(status) ret.append(self.curse_add_line(msg, self.get_ports_alert(p))) ret.append(self.curse_new_line()) elif 'url' in p: msg = '{:{width}}'.format(p['description'][0:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) if isinstance(p['status'], numbers.Number): status = 'Code {}'.format(p['status']) elif p['status'] is None: status = 'Scanning' else: status = p['status'] msg = '{:>9}'.format(status) ret.append(self.curse_add_line(msg, self.get_web_alert(p))) ret.append(self.curse_new_line()) # Delete the last empty line try: ret.pop() except IndexError: pass return ret
class GlancesProcesses(object): """Get processed stats using the psutil library.""" def __init__(self, cache_timeout=60): """Init the class to collect stats about processes.""" # Add internals caches because PSUtil do not cache all the stats # See: https://code.google.com/p/psutil/issues/detail?id=462 self.username_cache = {} self.cmdline_cache = {} # The internals caches will be cleaned each 'cache_timeout' seconds self.cache_timeout = cache_timeout self.cache_timer = Timer(self.cache_timeout) # Init the io dict # key = pid # value = [ read_bytes_old, write_bytes_old ] self.io_old = {} # Wether or not to enable process tree self._enable_tree = False self.process_tree = None # Init stats self.auto_sort = True self._sort_key = 'cpu_percent' self.allprocesslist = [] self.processlist = [] self.reset_processcount() # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption) # Default is to enable the processes stats self.disable_tag = False # Extended stats for top process is enable by default self.disable_extended_tag = False # Maximum number of processes showed in the UI (None if no limit) self._max_processes = None # Process filter is a regular expression self._filter = GlancesFilter() # Whether or not to hide kernel threads self.no_kernel_threads = False # Store maximums values in a dict # Used in the UI to highlight the maximum value self._max_values_list = ('cpu_percent', 'memory_percent') # { 'cpu_percent': 0.0, 'memory_percent': 0.0 } self._max_values = {} self.reset_max_values() def reset_processcount(self): self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0, 'pid_max': None } def enable(self): """Enable process stats.""" self.disable_tag = False self.update() def disable(self): """Disable process stats.""" self.disable_tag = True def enable_extended(self): """Enable extended process stats.""" self.disable_extended_tag = False self.update() def disable_extended(self): """Disable extended process stats.""" self.disable_extended_tag = True @property def pid_max(self): """ Get the maximum PID value. On Linux, the value is read from the `/proc/sys/kernel/pid_max` file. From `man 5 proc`: The default value for this file, 32768, results in the same range of PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum value for pid_max. On 64-bit systems, pid_max can be set to any value up to 2^22 (PID_MAX_LIMIT, approximately 4 million). If the file is unreadable or not available for whatever reason, returns None. Some other OSes: - On FreeBSD and macOS the maximum is 99999. - On OpenBSD >= 6.0 the maximum is 99999 (was 32766). - On NetBSD the maximum is 30000. :returns: int or None """ if LINUX: # XXX: waiting for https://github.com/giampaolo/psutil/issues/720 try: with open('/proc/sys/kernel/pid_max', 'rb') as f: return int(f.read()) except (OSError, IOError): return None @property def max_processes(self): """Get the maximum number of processes showed in the UI.""" return self._max_processes @max_processes.setter def max_processes(self, value): """Set the maximum number of processes showed in the UI.""" self._max_processes = value @property def process_filter_input(self): """Get the process filter (given by the user).""" return self._filter.filter_input @property def process_filter(self): """Get the process filter (current apply filter).""" return self._filter.filter @process_filter.setter def process_filter(self, value): """Set the process filter.""" self._filter.filter = value @property def process_filter_key(self): """Get the process filter key.""" return self._filter.filter_key @property def process_filter_re(self): """Get the process regular expression compiled.""" return self._filter.filter_re def disable_kernel_threads(self): """Ignore kernel threads in process list.""" self.no_kernel_threads = True def enable_tree(self): """Enable process tree.""" self._enable_tree = True def is_tree_enabled(self): """Return True if process tree is enabled, False instead.""" return self._enable_tree @property def sort_reverse(self): """Return True to sort processes in reverse 'key' order, False instead.""" if self.sort_key == 'name' or self.sort_key == 'username': return False return True def max_values(self): """Return the max values dict.""" return self._max_values def get_max_values(self, key): """Get the maximum values of the given stat (key).""" return self._max_values[key] def set_max_values(self, key, value): """Set the maximum value for a specific stat (key).""" self._max_values[key] = value def reset_max_values(self): """Reset the maximum values dict.""" self._max_values = {} for k in self._max_values_list: self._max_values[k] = 0.0 def __get_mandatory_stats(self, proc, procstat): """ Get mandatory_stats: for all processes. Needed for the sorting/filter step. Stats grabbed inside this method: * 'name', 'cpu_times', 'status', 'ppid' * 'username', 'cpu_percent', 'memory_percent' """ procstat['mandatory_stats'] = True # Name, cpu_times, status and ppid stats are in the same /proc file # Optimisation fir issue #958 try: procstat.update( proc.as_dict(attrs=['name', 'cpu_times', 'status', 'ppid'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 (process no longer exist) return None else: procstat['status'] = str(procstat['status'])[:1].upper() try: procstat.update( proc.as_dict( attrs=['username', 'cpu_percent', 'memory_percent'], ad_value='')) except psutil.NoSuchProcess: # Try/catch for issue #432 (process no longer exist) return None if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '': # Do not display process if we cannot get the basic # cpu_percent or memory_percent stats return None # Compute the maximum value for cpu_percent and memory_percent for k in self._max_values_list: if procstat[k] > self.get_max_values(k): self.set_max_values(k, procstat[k]) # Process command line (cached with internal cache) if procstat['pid'] not in self.cmdline_cache: # Patch for issue #391 try: self.cmdline_cache[procstat['pid']] = proc.cmdline() except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess, psutil.WindowsError): self.cmdline_cache[procstat['pid']] = "" procstat['cmdline'] = self.cmdline_cache[procstat['pid']] # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied (display "?") # If io_tag = 1 > No access denied (display the IO rate) # Availability: all platforms except macOS and Illumos/Solaris try: # Get the process IO counters proc_io = proc.io_counters() io_new = [proc_io.read_bytes, proc_io.write_bytes] except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError): # Access denied to process IO (no root account) # NoSuchProcess (process die between first and second grab) # Put 0 in all values (for sort) and io_tag = 0 (for display) procstat['io_counters'] = [0, 0] + [0, 0] io_tag = 0 except AttributeError: return procstat else: # For IO rate computation # Append saved IO r/w bytes try: procstat['io_counters'] = io_new + self.io_old[procstat['pid']] except KeyError: procstat['io_counters'] = io_new + [0, 0] # then save the IO r/w bytes self.io_old[procstat['pid']] = io_new io_tag = 1 # Append the IO tag (for display) procstat['io_counters'] += [io_tag] return procstat def __get_standard_stats(self, proc, procstat): """ Get standard_stats: only for displayed processes. Stats grabbed inside this method: * nice and memory_info """ procstat['standard_stats'] = True # Process nice and memory_info (issue #926) try: procstat.update(proc.as_dict(attrs=['nice', 'memory_info'])) except psutil.NoSuchProcess: pass return procstat def __get_extended_stats(self, proc, procstat): """ Get extended stats, only for top processes (see issue #403). - cpu_affinity (Linux, Windows, FreeBSD) - ionice (Linux and Windows > Vista) - memory_full_info (Linux) - num_ctx_switches (not available on Illumos/Solaris) - num_fds (Unix-like) - num_handles (Windows) - num_threads (not available on *BSD) - memory_maps (only swap, Linux) https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ - connections (TCP and UDP) """ procstat['extended_stats'] = True for stat in [ 'cpu_affinity', 'ionice', 'memory_full_info', 'num_ctx_switches', 'num_fds', 'num_handles', 'num_threads' ]: try: procstat.update(proc.as_dict(attrs=[stat])) except psutil.NoSuchProcess: pass # XXX: psutil>=4.3.1 raises ValueError while <4.3.1 raises AttributeError except (ValueError, AttributeError): procstat[stat] = None if LINUX: try: procstat['memory_swap'] = sum( [v.swap for v in proc.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, TypeError, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). # XXX: Remove TypeError once we'll drop psutil < 3.0.0. procstat['memory_swap'] = None try: procstat['tcp'] = len(proc.connections(kind="tcp")) procstat['udp'] = len(proc.connections(kind="udp")) except psutil.AccessDenied: procstat['tcp'] = None procstat['udp'] = None return procstat def __get_process_stats(self, proc, mandatory_stats=True, standard_stats=True, extended_stats=False): """Get stats of a running processes.""" # Process ID (always) procstat = proc.as_dict(attrs=['pid']) if mandatory_stats: procstat = self.__get_mandatory_stats(proc, procstat) if procstat is not None and standard_stats: procstat = self.__get_standard_stats(proc, procstat) if procstat is not None and extended_stats and not self.disable_extended_tag: procstat = self.__get_extended_stats(proc, procstat) return procstat def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Reset the max dict self.reset_max_values() # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on macOS # waiting for upstream patch from psutil if (s is None or BSD and s['name'] == 'idle' or WINDOWS and s['name'] == 'System Idle Process' or MACOS and s['name'] == 'kernel_task'): continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format( self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset() def getcount(self): """Get the number of processes.""" return self.processcount def getalllist(self): """Get the allprocesslist.""" return self.allprocesslist def getlist(self, sortedby=None): """Get the processlist.""" return self.processlist def gettree(self): """Get the process tree.""" return self.process_tree @property def sort_key(self): """Get the current sort key.""" return self._sort_key @sort_key.setter def sort_key(self, key): """Set the current sort key.""" self._sort_key = key
class CpuPercent(object): """Get and store the CPU percent.""" def __init__(self, cached_time=1): self.cpu_percent = 0 self.percpu_percent = [] # cached_time is the minimum time interval between stats updates # since last update is passed (will retrieve old cached info instead) self.timer_cpu = Timer(0) self.timer_percpu = Timer(0) self.cached_time = cached_time def get_key(self): """Return the key of the per CPU list.""" return 'cpu_number' def get(self, percpu=False): """Update and/or return the CPU using the psutil library. If percpu, return the percpu stats""" if percpu: return self.__get_percpu() else: return self.__get_cpu() def __get_cpu(self): """Update and/or return the CPU using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_cpu.finished(): self.cpu_percent = psutil.cpu_percent(interval=0.0) # Reset timer for cache self.timer_cpu = Timer(self.cached_time) return self.cpu_percent def __get_percpu(self): """Update and/or return the per CPU list using the psutil library.""" # Never update more than 1 time per cached_time if self.timer_percpu.finished(): self.percpu_percent = [] for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)): cpu = {'key': self.get_key(), 'cpu_number': cpu_number, 'total': round(100 - cputimes.idle, 1), 'user': cputimes.user, 'system': cputimes.system, 'idle': cputimes.idle} # The following stats are for API purposes only if hasattr(cputimes, 'nice'): cpu['nice'] = cputimes.nice if hasattr(cputimes, 'iowait'): cpu['iowait'] = cputimes.iowait if hasattr(cputimes, 'irq'): cpu['irq'] = cputimes.irq if hasattr(cputimes, 'softirq'): cpu['softirq'] = cputimes.softirq if hasattr(cputimes, 'steal'): cpu['steal'] = cputimes.steal if hasattr(cputimes, 'guest'): cpu['guest'] = cputimes.guest if hasattr(cputimes, 'guest_nice'): cpu['guest_nice'] = cputimes.guest_nice # Append new CPU to the list self.percpu_percent.append(cpu) # Reset timer for cache self.timer_percpu = Timer(self.cached_time) return self.percpu_percent
class Plugin(GlancesPlugin): """Glances ports scanner plugin.""" def __init__(self, args=None, config=None): """Init the plugin.""" super(Plugin, self).__init__(args=args, config=config, stats_init_value=[]) self.args = args self.config = config # We want to display the stat in the curse interface self.display_curse = True # Init stats self.stats = GlancesPortsList( config=config, args=args).get_ports_list() + GlancesWebList( config=config, args=args).get_web_list() # Init global Timer self.timer_ports = Timer(0) # Global Thread running all the scans self._thread = None def exit(self): """Overwrite the exit method to close threads.""" if self._thread is not None: self._thread.stop() # Call the father class super(Plugin, self).exit() @GlancesPlugin._log_result_decorator def update(self): """Update the ports list.""" if self.input_method == 'local': # Only refresh: # * if there is not other scanning thread # * every refresh seconds (define in the configuration file) if self._thread is None: thread_is_running = False else: thread_is_running = self._thread.is_alive() if self.timer_ports.finished() and not thread_is_running: # Run ports scanner self._thread = ThreadScanner(self.stats) self._thread.start() # Restart timer if len(self.stats) > 0: self.timer_ports = Timer(self.stats[0]['refresh']) else: self.timer_ports = Timer(0) else: # Not available in SNMP mode pass return self.stats def get_key(self): """Return the key of the list.""" return 'indice' def get_ports_alert(self, port, header="", log=False): """Return the alert status relative to the port scan return value.""" ret = 'OK' if port['status'] is None: ret = 'CAREFUL' elif port['status'] == 0: ret = 'CRITICAL' elif (isinstance(port['status'], (float, int)) and port['rtt_warning'] is not None and port['status'] > port['rtt_warning']): ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, port[self.get_key()]) return ret def get_web_alert(self, web, header="", log=False): """Return the alert status relative to the web/url scan return value.""" ret = 'OK' if web['status'] is None: ret = 'CAREFUL' elif web['status'] not in [200, 301, 302]: ret = 'CRITICAL' elif web['rtt_warning'] is not None and web['elapsed'] > web[ 'rtt_warning']: ret = 'WARNING' # Get stat name stat_name = self.get_stat_name(header=header) # Manage threshold self.manage_threshold(stat_name, ret) # Manage action self.manage_action(stat_name, ret.lower(), header, web[self.get_key()]) return ret def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message # Only process if stats exist and display plugin enable... ret = [] if not self.stats or args.disable_ports: return ret # Max size for the interface name name_max_width = max_width - 7 # Build the string message for p in self.stats: if 'host' in p: if p['host'] is None: status = 'None' elif p['status'] is None: status = 'Scanning' elif isinstance(p['status'], bool_type) and p['status'] is True: status = 'Open' elif p['status'] == 0: status = 'Timeout' else: # Convert second to ms status = '{0:.0f}ms'.format(p['status'] * 1000.0) msg = '{:{width}}'.format(p['description'][0:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) msg = '{:>9}'.format(status) ret.append( self.curse_add_line( msg, self.get_ports_alert(p, header=p['indice'] + '_rtt'))) ret.append(self.curse_new_line()) elif 'url' in p: msg = '{:{width}}'.format(p['description'][0:name_max_width], width=name_max_width) ret.append(self.curse_add_line(msg)) if isinstance(p['status'], numbers.Number): status = 'Code {}'.format(p['status']) elif p['status'] is None: status = 'Scanning' else: status = p['status'] msg = '{:>9}'.format(status) ret.append( self.curse_add_line( msg, self.get_web_alert(p, header=p['indice'] + '_rtt'))) ret.append(self.curse_new_line()) # Delete the last empty line try: ret.pop() except IndexError: pass return ret