def update(self, servers_list): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. servers_list: Dict of dict with servers stats """ # Flush display logger.debug("Servers list: {}".format(servers_list)) self.flush(servers_list) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(servers_list) # Is it an exit or select server key ? exitkey = pressedkey == ord("\x1b") or pressedkey == ord("q") or pressedkey == 10 if not exitkey and pressedkey > -1: # Redraw display self.flush(servers_list) # Wait 100ms... self.wait() return self.active_server
def update(self): """Update the command result attributed.""" # Get the current processes list (once) processlist = glances_processes.getlist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): if not v.enable(): # Do not update if the enable tag is set continue amps_list = self._build_amps_list(v, processlist) if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} processes {} detected ({})".format(len(amps_list), k, amps_list)) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" if countmin is defined v.set_result("No running process") return self.__amps_dict
def _build_amps_list(self, amp_value, processlist): """Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression """ ret = [] try: # Search in both cmdline and name (for kernel thread, see #1261) for p in processlist: add_it = False if (re.search(amp_value.regex(), p['name']) is not None): add_it = True else: for c in p['cmdline']: if (re.search(amp_value.regex(), c) is not None): add_it = True break if add_it: ret.append({'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}) except (TypeError, KeyError) as e: logger.debug("Can not build AMPS list ({})".format(e)) return ret
def _port_scan_tcp(self, port): """Scan the (TCP) port structure (dict) and update the status key""" ret = None # Create and configure the scanning socket try: socket.setdefaulttimeout(port['timeout']) _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except Exception as e: logger.debug("{0}: Error while creating scanning socket".format(self.plugin_name)) # Scan port ip = self._resolv_name(port['host']) counter = Counter() try: ret = _socket.connect_ex((ip, int(port['port']))) except Exception as e: logger.debug("{0}: Error while scanning port {1} ({2})".format(self.plugin_name, port, e)) else: if ret == 0: port['status'] = counter.get() else: port['status'] = False finally: _socket.close() return ret
def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """ for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory value['indice'] = str(l) value['path'] = self.config.get_value(section, key + 'path') if value['path'] is None: continue else: value['path'] = nativestr(value['path']) # Optional conf keys for i in ['careful', 'warning', 'critical']: # Read threshold value[i] = self.config.get_value(section, key + i) if value[i] is not None: logger.debug("{} threshold for folder {} is {}".format(i, value["path"], value[i])) # Read action action = self.config.get_value(section, key + i + '_action') if action is not None: value[i + '_action'] = action logger.debug("{} action for folder {} is {}".format(i, value["path"], value[i + '_action'])) # Add the item to the list self.__folder_list.append(value)
def _api_all(self): """Glances API RESTful implementation. Return the JSON representation of all the plugins HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if self.args.debug: fname = os.path.join(tempfile.gettempdir(), 'glances-debug.json') try: with open(fname) as f: return f.read() except IOError: logger.debug("Debug file (%s) not found" % fname) # Update the stat self.__update__() try: # Get the JSON value of the stat ID statval = json.dumps(self.stats.getAllAsDict()) except Exception as e: abort(404, "Cannot get stats (%s)" % str(e)) return statval
def load_exports(self, args=None): """Load all export modules in the 'exports' folder.""" if args is None: return False header = "glances_" # Transform the arguments list into a dict # The aim is to chec if the export module should be loaded args_var = vars(locals()['args']) for item in os.listdir(exports_path): export_name = os.path.basename(item)[len(header):-3].lower() if (item.startswith(header) and item.endswith(".py") and item != (header + "export.py") and item != (header + "history.py") and args_var['export_' + export_name] is not None and args_var['export_' + export_name] is not False): # Import the export module export_module = __import__(os.path.basename(item)[:-3]) # Add the export to the dictionary # The key is the module name # for example, the file glances_xxx.py # generate self._exports_list["xxx"] = ... self._exports[export_name] = export_module.Export(args=args, config=self.config) # Log plugins list logger.debug("Available exports modules list: {}".format(self.getExportList())) return True
def wrapper(*args, **kw): ret = fct(*args, **kw) logger.debug("%s %s %s return %s" % ( args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.__name__, ret)) return ret
def update(self): """Update IP stats using the input method. Stats is dict """ # Reset stats self.reset() if self.input_method == 'local' and netifaces_tag: # Update stats using the netifaces lib try: default_gw = netifaces.gateways()['default'][netifaces.AF_INET] except (KeyError, AttributeError) as e: logger.debug("Cannot grab the default gateway ({0})".format(e)) else: try: self.stats['address'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['addr'] self.stats['mask'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['netmask'] self.stats['mask_cidr'] = self.ip_to_cidr(self.stats['mask']) self.stats['gateway'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except (KeyError, AttributeError) as e: logger.debug("Cannot grab IP information: {0}".format(e)) elif self.input_method == 'snmp': # Not implemented yet pass # Update the view self.update_views() return self.stats
def load_config(self, config): """Load the outputs section of the configuration file.""" # Load the theme if config is not None and config.has_section('outputs'): logger.debug('Read the outputs section in the configuration file') self.theme['name'] = config.get_value('outputs', 'curse_theme', default='black') logger.debug('Theme for the curse interface: {}'.format(self.theme['name']))
def login(self): """Logon to the server.""" if self.args.snmp_force: # Force SNMP instead of Glances server self.client_mode = 'snmp' else: # First of all, trying to connect to a Glances server if not self._login_glances(): return False # Try SNMP mode if self.client_mode == 'snmp': if not self._login_snmp(): return False # Load limits from the configuration file # Each client can choose its owns limits logger.debug("Load limits from the client configuration file") self.stats.load_limits(self.config) # Init screen self.screen = GlancesCursesClient(config=self.config, args=self.args) # Return True: OK return True
def load_conf(self, section="opentsdb"): """Load the OpenTSDB configuration in the Glances configuration file.""" if self.config is None: return False try: self.host = self.config.get_value(section, 'host') self.port = self.config.get_value(section, 'port') except NoSectionError: logger.critical("No OpenTSDB configuration found") return False except NoOptionError as e: logger.critical("Error in the OpenTSDB configuration (%s)" % e) return False else: logger.debug("Load OpenTSDB from the Glances configuration file") # Prefix is optional try: self.prefix = self.config.get_value(section, 'prefix') except NoOptionError: pass # Tags are optional, comma separated key:value pairs. try: self.tags = self.config.get_value(section, 'tags') except NoOptionError: pass return True
def update(self): """Update the command result attributed.""" # Search application monitored processes by a regular expression processlist = glances_processes.getalllist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): if not v.enable(): # Do not update if the enable tag is set continue try: amps_list = [p for p in processlist for c in p['cmdline'] if re.search(v.regex(), c) is not None] except TypeError: continue if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} process detected (PID={})".format(k, amps_list[0]['pid'])) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" is countmin is defined v.set_result("No running process") return self.__amps_dict
def export(self, name, columns, points): """Write the points to the Prometheus exporter using Gauge.""" logger.debug("Export {} stats to Prometheus exporter".format(name)) # Remove non number stats and convert all to float (for Boolean) data = {k: float(v) for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number)} # Write metrics to the Prometheus exporter for k, v in iteritems(data): # Prometheus metric name: prefix_<glances stats name> metric_name = self.prefix + self.METRIC_SEPARATOR + name + self.METRIC_SEPARATOR + k # Prometheus is very sensible to the metric name # See: https://prometheus.io/docs/practices/naming/ for c in ['.', '-', '/', ' ']: metric_name = metric_name.replace(c, self.METRIC_SEPARATOR) # Get the labels labels = self.parse_tags(self.labels) # Manage an internal dict between metric name and Gauge if metric_name not in self._metric_dict: self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels)) # Write the value if hasattr(self._metric_dict[metric_name], 'labels'): # Add the labels (see issue #1255) self._metric_dict[metric_name].labels(**labels).set(v) else: self._metric_dict[metric_name].set(v)
def __catch_key(self, servers_list): # Catch the browser pressed key self.pressedkey = self.get_key(self.term_window) if self.pressedkey != -1: logger.debug("Key pressed. Code=%s" % self.pressedkey) # Actions... if self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'): # 'ESC'|'q' > Quit self.end() logger.info("Stop Glances client browser") sys.exit(0) elif self.pressedkey == 10: # 'ENTER' > Run Glances on the selected server logger.debug("Server number {} selected".format(self.cursor + 1)) self.active_server = self.cursor elif self.pressedkey == curses.KEY_UP: # 'UP' > Up in the server list self.cursor_up(servers_list) elif self.pressedkey == curses.KEY_DOWN: # 'DOWN' > Down in the server list self.cursor_down(servers_list) # Return the key code return self.pressedkey
def __serve_forever(self): """Main loop for the CLI. return True if we should continue (no exit key has been pressed) """ # Start a counter used to compute the time needed for # update and export the stats counter = Counter() # Update stats self.stats.update() logger.debug('Stats updated in {} seconds'.format(counter.get())) # Export stats counter_export = Counter() self.stats.export(self.stats) logger.debug('Stats exported in {} seconds'.format(counter_export.get())) # Display stats # and wait refresh_time - counter if not self.quiet: # The update function return True if an exit key 'q' or 'ESC' # has been pressed. ret = not self.screen.update(self.stats, duration=self.refresh_time - counter.get()) else: # Nothing is displayed # Break should be done via a signal (CTRL-C) time.sleep(self.refresh_time - counter.get()) ret = True return ret
def __init__(self, config=None, args=None, timeout=7, return_to_browser=False): # Store the arg/config self.args = args self.config = config # Default client mode self._client_mode = 'glances' # Return to browser or exit self.return_to_browser = return_to_browser # Build the URI if args.password != "": self.uri = 'http://{}:{}@{}:{}'.format(args.username, args.password, args.client, args.port) else: self.uri = 'http://{}:{}'.format(args.client, args.port) logger.debug("Try to connect to {}".format(self.uri)) # Try to connect to the URI transport = GlancesClientTransport() # Configure the server timeout transport.set_timeout(timeout) try: self.client = ServerProxy(self.uri, transport=transport) except Exception as e: self.log_and_exit("Client couldn't create socket {}: {}".format(self.uri, e))
def load_exports(self, args=None): """Load all export modules in the 'exports' folder.""" if args is None: return False header = "glances_" # Build the export module available list args_var = vars(locals()['args']) for item in os.listdir(exports_path): export_name = os.path.basename(item)[len(header):-3].lower() if (item.startswith(header) and item.endswith(".py") and item != (header + "export.py") and item != (header + "history.py")): self._exports_all[export_name] = os.path.basename(item)[:-3] # Set the disable_<name> to False by default setattr(self.args, 'export_' + export_name, getattr(self.args, 'export_' + export_name, False)) # Aim is to check if the export module should be loaded for export_name in self._exports_all: if getattr(self.args, 'export_' + export_name, False): # Import the export module export_module = __import__(self._exports_all[export_name]) # Add the export to the dictionary # The key is the module name # for example, the file glances_xxx.py # generate self._exports_list["xxx"] = ... self._exports[export_name] = export_module.Export(args=args, config=self.config) self._exports_all[export_name] = self._exports[export_name] # Log plugins list logger.debug("Active exports modules list: {}".format(self.getExportsList())) return True
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # Create DB input if self.version == INFLUXDB_08: data = [{'name': name, 'columns': columns, 'points': [points]}] else: # Convert all int to float (mandatory for InfluxDB>0.9.2) # Correct issue#750 and issue#749 for i, _ in enumerate(points): try: points[i] = float(points[i]) except (TypeError, ValueError) as e: logger.debug("InfluxDB error during stat convertion %s=%s (%s)" % (columns[i], points[i], e)) data = [{'measurement': name, 'tags': self.parse_tags(self.tags), 'fields': dict(zip(columns, points))}] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {} stats to InfluxDB ({})".format(name, e))
def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """ for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory try: value['path'] = self.config.get_value(section, key + 'path') except Exception as e: logger.error("Cannot read folder list: {0}".format(e)) continue if value['path'] is None: continue # Optional conf keys for i in ['careful', 'warning', 'critical']: try: value[i] = self.config.get_value(section, key + i) except: value[i] = None logger.debug("No {0} threshold for folder {1}".format(i, value["path"])) # Add the item to the list self.__folder_list.append(value)
def load_config(self, config): """Load the outputs section of the configuration file.""" # Limit the number of processes to display in the WebUI if config is not None and config.has_section('outputs'): logger.debug('Read number of processes to display in the WebUI') n = config.get_value('outputs', 'max_processes_display', default=None) logger.debug('Number of processes to display in the WebUI: {}'.format(n))
def export(self, name, columns, points): """Write the points to the ZeroMQ server.""" logger.debug("Export {} stats to ZeroMQ".format(name)) # Create DB input data = dict(zip(columns, points)) # Do not publish empty stats if data == {}: return False # Glances envelopes the stats in a publish message with two frames: # - First frame containing the following prefix (STRING) # - Second frame with the Glances plugin name (STRING) # - Third frame with the Glances plugin stats (JSON) message = [b(self.prefix), b(name), asbytes(json.dumps(data))] # Write data to the ZeroMQ bus # Result can be view: tcp://host:port try: self.client.send_multipart(message) except Exception as e: logger.error("Cannot export {} stats to ZeroMQ ({})".format(name, e)) return True
def filter(self, value): """Set the filter (as a sting) and compute the regular expression A filter could be one of the following: - python > Process name of cmd start with python - .*python.* > Process name of cmd contain python - username:nicolargo > Process of nicolargo user """ self._filter_input = value if value is None: self._filter = None self._filter_key = None else: new_filter = value.split(':') if len(new_filter) == 1: self._filter = new_filter[0] self._filter_key = None else: self._filter = new_filter[1] self._filter_key = new_filter[0] self._filter_re = None if self.filter is not None: logger.info("Set filter to {} on key {}".format(self.filter, self.filter_key)) # Compute the regular expression try: self._filter_re = re.compile(self.filter) logger.debug("Filter regex compilation OK: {}".format(self.filter)) except Exception as e: logger.error("Cannot compile filter regex: {} ({})".format(self.filter, e)) self._filter = None self._filter_re = None self._filter_key = None
def update(self, stats, duration=3, cs_status=None, return_to_browser=False): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. stats: Dict of dict with servers stats """ # Flush display logger.debug('Servers list: {}'.format(stats)) self.flush(stats) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(stats) # Is it an exit or select server key ? exitkey = ( pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(stats) # Wait 100ms... self.wait() return self.active_server
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using service {}'.format(self.NAME, self.get('service_cmd'))) try: res = check_output(self.get('service_cmd').split(), stderr=STDOUT).decode('utf-8') except OSError as e: logger.debug('{}: Error while executing service ({})'.format(self.NAME, e)) else: status = {'running': 0, 'stopped': 0, 'upstart': 0} # For each line for r in res.split('\n'): # Split per space .* l = r.split() if len(l) < 4: continue if l[1] == '+': status['running'] += 1 elif l[1] == '-': status['stopped'] += 1 elif l[1] == '?': status['upstart'] += 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def __update__(self): """Update the stats.""" # Reset the list self.reset() if self.initok: for chip in sensors.iter_detected_chips(): for feature in chip: sensors_current = {} if feature.name.startswith(b'temp'): # Temperature sensor sensors_current['unit'] = SENSOR_TEMP_UNIT elif feature.name.startswith(b'fan'): # Fan speed sensor sensors_current['unit'] = SENSOR_FAN_UNIT if sensors_current: try: sensors_current['label'] = feature.label sensors_current['value'] = int(feature.get_value()) except SensorsError as e: logger.debug("Cannot grab sensor stat(%s)" % e) else: self.sensors_list.append(sensors_current) return self.sensors_list
def init(self): """Init the connection to the InfluxDB server.""" if not self.export_enable: return None # Cluster try: cluster = Cluster([self.host], port=int(self.port), protocol_version=int(self.protocol_version)) session = cluster.connect() except Exception as e: logger.critical("Cannot connect to Cassandra cluster '%s:%s' (%s)" % (self.host, self.port, e)) sys.exit(2) # Keyspace try: session.set_keyspace(self.keyspace) except InvalidRequest as e: logger.info("Create keyspace {} on the Cassandra cluster".format(self.keyspace)) c = "CREATE KEYSPACE %s WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '%s' }" % (self.keyspace, self.replication_factor) session.execute(c) session.set_keyspace(self.keyspace) logger.info( "Stats will be exported to Cassandra cluster {} ({}) in keyspace {}".format( cluster.metadata.cluster_name, cluster.metadata.all_hosts(), self.keyspace)) # Table try: session.execute("CREATE TABLE %s (plugin text, time timeuuid, stat map<text,float>, PRIMARY KEY (plugin, time)) WITH CLUSTERING ORDER BY (time DESC)" % self.table) except Exception: logger.debug("Cassandra table %s already exist" % self.table) return cluster, session
def update(self): """Update RAID stats using the input method.""" # Init new stats stats = self.get_init_value() if import_error_tag: return self.stats if self.input_method == 'local': # Update stats using the PyMDstat lib (https://github.com/nicolargo/pymdstat) try: # Just for test # mds = MdStat(path='/home/nicolargo/dev/pymdstat/tests/mdstat.10') mds = MdStat() stats = mds.get_stats()['arrays'] except Exception as e: logger.debug("Can not grab RAID stats (%s)" % e) return self.stats elif self.input_method == 'snmp': # Update stats using SNMP # No standard way for the moment... pass # Update the stats self.stats = stats return self.stats
def load_limits(self, config): """Load limits from the configuration file, if it exists.""" # By default set the history length to 3 points per second during one day self._limits['history_size'] = 28800 if not hasattr(config, 'has_section'): return False # Read the global section if config.has_section('global'): self._limits['history_size'] = config.get_float_value('global', 'history_size', default=28800) logger.debug("Load configuration key: {0} = {1}".format('history_size', self._limits['history_size'])) # Read the plugin specific section if config.has_section(self.plugin_name): for level, _ in config.items(self.plugin_name): # Read limits limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value(self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value(self.plugin_name, level).split(",") logger.debug("Load limit: {} = {}".format(limit, self._limits[limit])) return True
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using systemctl {}'.format(self.NAME, self.get('systemctl_cmd'))) try: res = check_output(self.get('systemctl_cmd').split()) except OSError as e: logger.debug('{}: Error while executing systemctl ({})'.format(self.NAME, e)) else: status = {} # For each line for r in res.split('\n')[1:-8]: # Split per space .* l = r.split() if len(l) > 3: # load column for c in range(1, 3): try: status[l[c]] += 1 except KeyError: status[l[c]] = 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def __display_secondline(self, stat_display, stats): """Display the second line in the Curses interface. <QUICKLOOK> + CPU|PERCPU + <GPU> + MEM + SWAP + LOAD """ self.init_column() self.new_line() # Init quicklook stat_display['quicklook'] = {'msgdict': []} # Dict for plugins width plugin_widths = {'quicklook': 0} for p in ['cpu', 'gpu', 'mem', 'memswap', 'load']: plugin_widths[p] = self.get_stats_display_width( stat_display[p]) if hasattr(self.args, 'disable_' + p) and p in stat_display else 0 # Width of all plugins stats_width = sum(itervalues(plugin_widths)) # Number of plugin but quicklook stats_number = (int(not self.args.disable_cpu and stat_display["cpu"]['msgdict'] != []) + int(not self.args.disable_gpu and stat_display["gpu"]['msgdict'] != []) + int(not self.args.disable_mem and stat_display["mem"]['msgdict'] != []) + int(not self.args.disable_memswap and stat_display["memswap"]['msgdict'] != []) + int(not self.args.disable_load and stat_display["load"]['msgdict'] != [])) if not self.args.disable_quicklook: # Quick look is in the place ! if self.args.full_quicklook: quicklook_width = self.screen.getmaxyx()[1] - ( stats_width + 8 + stats_number * self.space_between_column) else: quicklook_width = min( self.screen.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column), 79) try: stat_display["quicklook"] = stats.get_plugin( 'quicklook').get_stats_display(max_width=quicklook_width, args=self.args) except AttributeError as e: logger.debug("Quicklook plugin not available (%s)" % e) else: plugin_widths['quicklook'] = self.get_stats_display_width( stat_display["quicklook"]) stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = 1 self.display_plugin(stat_display["quicklook"]) self.new_column() # Compute spaces between plugins # Note: Only one space between Quicklook and others plugin_display_optional = {} for p in ['cpu', 'gpu', 'mem', 'memswap', 'load']: plugin_display_optional[p] = True if stats_number > 1: self.space_between_column = max( 1, int((self.screen.getmaxyx()[1] - stats_width) / (stats_number - 1))) for p in ['mem', 'cpu']: # No space ? Remove optional stats if self.space_between_column < 3: plugin_display_optional[p] = False plugin_widths[p] = self.get_stats_display_width( stat_display[p], without_option=True) if hasattr( self.args, 'disable_' + p) else 0 stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = max( 1, int((self.screen.getmaxyx()[1] - stats_width) / (stats_number - 1))) else: self.space_between_column = 0 # Display CPU, MEM, SWAP and LOAD for p in ['cpu', 'gpu', 'mem', 'memswap', 'load']: if p in stat_display: self.display_plugin( stat_display[p], display_optional=plugin_display_optional[p]) if p is not 'load': # Skip last column self.new_column() # Space between column self.space_between_column = 3 # Backup line position self.saved_line = self.next_line
def __serve_forever(self): """Main client loop.""" while True: # No need to update the server list # It's done by the GlancesAutoDiscoverListener class (autodiscover.py) # Or define staticaly in the configuration file (module static_list.py) # For each server in the list, grab elementary stats (CPU, LOAD, MEM, OS...) # logger.debug(self.get_servers_list()) try: for v in self.get_servers_list(): # Do not retreive stats for statics server # Why ? Because for each offline servers, the timeout will be reached # So ? The curse interface freezes if v['type'] == 'STATIC' and v['status'] in [ 'UNKNOWN', 'SNMP', 'OFFLINE' ]: continue # Get the server URI uri = self.__get_uri(v) # Try to connect to the server t = GlancesClientTransport() t.set_timeout(3) # Get common stats try: s = ServerProxy(uri, transport=t) except Exception as e: logger.warning( "Client browser couldn't create socket {0}: {1}". format(uri, e)) else: # Mandatory stats try: # CPU% cpu_percent = 100 - json.loads(s.getCpu())['idle'] v['cpu_percent'] = '{0:.1f}'.format(cpu_percent) # MEM% v['mem_percent'] = json.loads( s.getMem())['percent'] # OS (Human Readable name) v['hr_name'] = json.loads(s.getSystem())['hr_name'] except (socket.error, Fault, KeyError) as e: logger.debug( "Error while grabbing stats form {0}: {1}". format(uri, e)) v['status'] = 'OFFLINE' except ProtocolError as e: if e.errcode == 401: # Error 401 (Authentication failed) # Password is not the good one... v['password'] = None v['status'] = 'PROTECTED' else: v['status'] = 'OFFLINE' logger.debug( "Cannot grab stats from {0} ({1} {2})".format( uri, e.errcode, e.errmsg)) else: # Status v['status'] = 'ONLINE' # Optional stats (load is not available on Windows OS) try: # LOAD load_min5 = json.loads(s.getLoad())['min5'] v['load_min5'] = '{0:.2f}'.format(load_min5) except Exception as e: logger.warning( "Error while grabbing stats form {0}: {1}". format(uri, e)) # List can change size during iteration... except RuntimeError: logger.debug( "Server list dictionnary change inside the loop (wait next update)" ) # Update the screen (list or Glances client) if not self.screen.active_server: # Display the Glances browser self.screen.update(self.get_servers_list()) else: # Display the Glances client for the selected server logger.debug("Selected server: {0}".format( self.get_servers_list()[self.screen.active_server])) # Connection can take time # Display a popup self.screen.display_popup('Connect to {0}:{1}'.format( v['name'], v['port']), duration=1) # A password is needed to access to the server's stats if self.get_servers_list()[ self.screen.active_server]['password'] is None: # First of all, check if a password is available in the [passwords] section clear_password = self.password.get_password(v['name']) if (clear_password is None or self.get_servers_list() [self.screen.active_server]['status'] == 'PROTECTED'): # Else, the password should be enter by the user # Display a popup to enter password clear_password = self.screen.display_popup( 'Password needed for {0}: '.format(v['name']), is_input=True) # Store the password for the selected server if clear_password is not None: self.set_in_selected( 'password', self.password.sha256_hash(clear_password)) # Display the Glance client on the selected server logger.info("Connect Glances client to the {0} server".format( self.get_servers_list()[self.screen.active_server]['key'])) # Init the client args_server = self.args # Overwrite connection setting args_server.client = self.get_servers_list()[ self.screen.active_server]['ip'] args_server.port = self.get_servers_list()[ self.screen.active_server]['port'] args_server.username = self.get_servers_list()[ self.screen.active_server]['username'] args_server.password = self.get_servers_list()[ self.screen.active_server]['password'] client = GlancesClient(config=self.config, args=args_server, return_to_browser=True) # Test if client and server are in the same major version if not client.login(): self.screen.display_popup( "Sorry, cannot connect to '{0}'\n" "See 'glances.log' for more details".format(v['name'])) # Set the ONLINE status for the selected server self.set_in_selected('status', 'OFFLINE') else: # Start the client loop # Return connection type: 'glances' or 'snmp' connection_type = client.serve_forever() try: logger.debug( "Disconnect Glances client from the {0} server". format(self.get_servers_list()[ self.screen.active_server]['key'])) except IndexError: # Server did not exist anymore pass else: # Set the ONLINE status for the selected server if connection_type == 'snmp': self.set_in_selected('status', 'SNMP') else: self.set_in_selected('status', 'ONLINE') # Return to the browser (no server selected) self.screen.active_server = None
def display(self, servers_list): """Display the servers list. Return: True if the stats have been displayed False if the stats have not been displayed (no server available) """ # Init the internal line/column for Glances Curses self.init_line_column() # Get the current screen size screen_x = self.screen.getmaxyx()[1] screen_y = self.screen.getmaxyx()[0] # Init position x = 0 y = 0 # Display top header if len(servers_list) == 0: if self.first_scan and not self.args.disable_autodiscover: msg = 'Glances is scanning your network. Please wait...' self.first_scan = False else: msg = 'No Glances server available' elif len(servers_list) == 1: msg = 'One Glances server available' else: msg = '{} Glances servers available'.format(len(servers_list)) if self.args.disable_autodiscover: msg += ' ' + '(auto discover is disabled)' self.term_window.addnstr(y, x, msg, screen_x - x, self.colors_list['TITLE']) if len(servers_list) == 0: return False # Display the Glances server list # ================================ # Table of table # Item description: [stats_id, column name, column size] column_def = [ ['name', 'Name', 16], ['alias', None, None], ['load_min5', 'LOAD', 6], ['cpu_percent', 'CPU%', 5], ['mem_percent', 'MEM%', 5], ['status', 'STATUS', 9], ['ip', 'IP', 15], # ['port', 'PORT', 5], ['hr_name', 'OS', 16], ] y = 2 # Display table header xc = x + 2 for cpt, c in enumerate(column_def): if xc < screen_x and y < screen_y and c[1] is not None: self.term_window.addnstr(y, xc, c[1], screen_x - x, self.colors_list['BOLD']) xc += c[2] + self.space_between_column y += 1 # If a servers has been deleted from the list... # ... and if the cursor is in the latest position if self.cursor > len(servers_list) - 1: # Set the cursor position to the latest item self.cursor = len(servers_list) - 1 # Display table line = 0 for v in servers_list: # Get server stats server_stat = {} for c in column_def: try: server_stat[c[0]] = v[c[0]] except KeyError as e: logger.debug( "Cannot grab stats {} from server (KeyError: {})". format(c[0], e)) server_stat[c[0]] = '?' # Display alias instead of name try: if c[0] == 'alias' and v[c[0]] is not None: server_stat['name'] = v[c[0]] except KeyError: pass # Display line for server stats cpt = 0 xc = x # Is the line selected ? if line == self.cursor: # Display cursor self.term_window.addnstr(y, xc, ">", screen_x - xc, self.colors_list['BOLD']) # Display the line xc += 2 for c in column_def: if xc < screen_x and y < screen_y and c[1] is not None: # Display server stats self.term_window.addnstr(y, xc, format(server_stat[c[0]]), c[2], self.colors_list[v['status']]) xc += c[2] + self.space_between_column cpt += 1 # Next line, next server... y += 1 line += 1 return True
def exit(self): """Close the export module.""" logger.debug("Finalise export interface %s" % self.export_name)
"""Wifi plugin.""" import operator from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin import psutil # Use the Wifi Python lib (https://pypi.python.org/pypi/wifi) # Linux-only try: from wifi.scan import Cell from wifi.exceptions import InterfaceError except ImportError: logger.debug("Wifi library not found. Glances cannot grab Wifi info.") wifi_tag = False else: wifi_tag = True class Plugin(GlancesPlugin): """Glances Wifi plugin. Get stats of the current Wifi hotspots. """ def __init__(self, args=None): """Init the plugin.""" super(Plugin, self).__init__(args=args)
def stop(self, timeout=None): """Stop the thread.""" logger.debug("docker plugin - Close thread for container {}".format( self._container.name)) self._stopper.set()
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["networks"] except KeyError as e: # all_stats do not have NETWORK information logger.debug( "docker plugin - Cannot grab NET usage for container {} ({})". format(container_id, e)) logger.debug(all_stats) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: network_new['time_since_update'] = getTimeSinceLastUpdate( 'docker_net_{}'.format(container_id)) network_new['rx'] = netcounters["eth0"][ "rx_bytes"] - self.netcounters_old[container_id]["eth0"][ "rx_bytes"] network_new['tx'] = netcounters["eth0"][ "tx_bytes"] - self.netcounters_old[container_id]["eth0"][ "tx_bytes"] network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"] network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"] except KeyError as e: # all_stats do not have INTERFACE information logger.debug( "docker plugin - Cannot grab network interface usage for container {} ({})" .format(container_id, e)) logger.debug(all_stats) # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
def update(self): """Update Docker stats using the input method.""" # Init new stats stats = self.get_init_value() # The Docker-py lib is mandatory if import_error_tag: return self.stats if self.input_method == 'local': # Update stats # Docker version # Exemple: { # "KernelVersion": "3.16.4-tinycore64", # "Arch": "amd64", # "ApiVersion": "1.15", # "Version": "1.3.0", # "GitCommit": "c78088f", # "Os": "linux", # "GoVersion": "go1.3.3" # } try: stats['version'] = self.docker_client.version() except Exception as e: # Correct issue#649 logger.error( "{} plugin - Cannot get Docker version ({})".format( self.plugin_name, e)) # We may have lost connection remove version info if 'version' in self.stats: del self.stats['version'] self.stats['containers'] = [] return self.stats # Update current containers list try: # Issue #1152: Docker module doesn't export details about stopped containers # The Docker/all key of the configuration file should be set to True containers = self.docker_client.containers.list( all=self._all_tag()) or [] except Exception as e: logger.error( "{} plugin - Cannot get containers list ({})".format( self.plugin_name, e)) # We may have lost connection empty the containers list. self.stats['containers'] = [] return self.stats # Start new thread for new container for container in containers: if container.id not in self.thread_list: # Thread did not exist in the internal dict # Create it and add it to the internal dict logger.debug( "{} plugin - Create thread for container {}".format( self.plugin_name, container.id[:12])) t = ThreadDockerGrabber(container) self.thread_list[container.id] = t t.start() # Stop threads for non-existing containers nonexisting_containers = set(iterkeys(self.thread_list)) - set( [c.id for c in containers]) for container_id in nonexisting_containers: # Stop the thread logger.debug( "{} plugin - Stop thread for old container {}".format( self.plugin_name, container_id[:12])) self.thread_list[container_id].stop() # Delete the item from the dict del self.thread_list[container_id] # Get stats for all containers stats['containers'] = [] for container in containers: # Init the stats for the current container container_stats = {} # The key is the container name and not the Id container_stats['key'] = self.get_key() # Export name (first name in the Names list, without the /) container_stats['name'] = nativestr(container.name) # Export global Names (used by the WebUI) container_stats['Names'] = [nativestr(container.name)] # Container Id container_stats['Id'] = container.id # Container Image container_stats['Image'] = container.image.tags # Global stats (from attrs) container_stats['Status'] = container.attrs['State']['Status'] container_stats['Command'] = container.attrs['Config'][ 'Entrypoint'] # Standards stats if container_stats['Status'] in ('running', 'paused'): container_stats['cpu'] = self.get_docker_cpu( container.id, self.thread_list[container.id].stats) container_stats['cpu_percent'] = container_stats[ 'cpu'].get('total', None) container_stats['memory'] = self.get_docker_memory( container.id, self.thread_list[container.id].stats) container_stats['memory_usage'] = container_stats[ 'memory'].get('usage', None) container_stats['io'] = self.get_docker_io( container.id, self.thread_list[container.id].stats) container_stats['io_r'] = container_stats['io'].get( 'ior', None) container_stats['io_w'] = container_stats['io'].get( 'iow', None) container_stats['network'] = self.get_docker_network( container.id, self.thread_list[container.id].stats) container_stats['network_rx'] = container_stats[ 'network'].get('rx', None) container_stats['network_tx'] = container_stats[ 'network'].get('tx', None) else: container_stats['cpu'] = {} container_stats['cpu_percent'] = None container_stats['memory'] = {} container_stats['memory_percent'] = None container_stats['io'] = {} container_stats['io_r'] = None container_stats['io_w'] = None container_stats['network'] = {} container_stats['network_rx'] = None container_stats['network_tx'] = None # Add current container stats to the stats list stats['containers'].append(container_stats) elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass # Sort and update the stats self.stats = sort_stats(stats) return self.stats
def __display_server(self, server): """ Connect and display the given server """ # Display the Glances client for the selected server logger.debug("Selected server {}".format(server)) # Connection can take time # Display a popup self.screen.display_popup('Connect to {}:{}'.format( server['name'], server['port']), duration=1) # A password is needed to access to the server's stats if server['password'] is None: # First of all, check if a password is available in the [passwords] section clear_password = self.password.get_password(server['name']) if (clear_password is None or self.get_servers_list()[ self.screen.active_server]['status'] == 'PROTECTED'): # Else, the password should be enter by the user # Display a popup to enter password clear_password = self.screen.display_popup( 'Password needed for {}: '.format(server['name']), is_input=True) # Store the password for the selected server if clear_password is not None: self.set_in_selected('password', self.password.sha256_hash(clear_password)) # Display the Glance client on the selected server logger.info("Connect Glances client to the {} server".format( server['key'])) # Init the client args_server = self.args # Overwrite connection setting args_server.client = server['ip'] args_server.port = server['port'] args_server.username = server['username'] args_server.password = server['password'] client = GlancesClient(config=self.config, args=args_server, return_to_browser=True) # Test if client and server are in the same major version if not client.login(): self.screen.display_popup("Sorry, cannot connect to '{}'\n" "See '{}' for more details".format( server['name'], LOG_FILENAME)) # Set the ONLINE status for the selected server self.set_in_selected('status', 'OFFLINE') else: # Start the client loop # Return connection type: 'glances' or 'snmp' connection_type = client.serve_forever() try: logger.debug( "Disconnect Glances client from the {} server".format( server['key'])) except IndexError: # Server did not exist anymore pass else: # Set the ONLINE status for the selected server if connection_type == 'snmp': self.set_in_selected('status', 'SNMP') else: self.set_in_selected('status', 'ONLINE') # Return to the browser (no server selected) self.screen.active_server = None
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """RAID plugin.""" from glances.compat import iterkeys from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin # pymdstat only available on GNU/Linux OS try: from pymdstat import MdStat except ImportError: logger.debug("pymdstat library not found. Glances cannot grab RAID info.") class Plugin(GlancesPlugin): """Glances RAID plugin. stats is a dict (see pymdstat documentation) """ def __init__(self, args=None): """Init the plugin.""" super(Plugin, self).__init__(args=args) # We want to display the stat in the curse interface self.display_curse = True # Init the stats
def login(self): """Logon to the server.""" ret = True if not self.args.snmp_force: # First of all, trying to connect to a Glances server client_version = None try: client_version = self.client.init() except socket.error as err: # Fallback to SNMP self.client_mode = 'snmp' logger.error( "Connection to Glances server failed ({0} {1})".format( err.errno, err.strerror)) fallbackmsg = 'No Glances server found. Trying fallback to SNMP...' if not self.return_to_browser: print(fallbackmsg) else: logger.info(fallbackmsg) except ProtocolError as err: # Other errors msg = "Connection to server failed" if err.errcode == 401: msg += " (Bad username/password)" else: msg += " ({0} {1})".format(err.errcode, err.errmsg) self.log_and_exit(msg) return False if self.client_mode == 'glances': # Check that both client and server are in the same major version if __version__.split('.')[0] == client_version.split('.')[0]: # Init stats self.stats = GlancesStatsClient(config=self.config, args=self.args) self.stats.set_plugins( json.loads(self.client.getAllPlugins())) logger.debug( "Client version: {0} / Server version: {1}".format( __version__, client_version)) else: self.log_and_exit("Client and server not compatible: \ Client version: {0} / Server version: {1}" .format(version, client_version)) return False else: self.client_mode = 'snmp' # SNMP mode if self.client_mode == 'snmp': logger.info("Trying to grab stats by SNMP...") from glances.stats_client_snmp import GlancesStatsClientSNMP # Init stats self.stats = GlancesStatsClientSNMP(config=self.config, args=self.args) if not self.stats.check_snmp(): self.log_and_exit("Connection to SNMP server failed") return False if ret: # Load limits from the configuration file # Each client can choose its owns limits self.stats.load_limits(self.config) # Init screen self.screen = GlancesCursesClient(args=self.args) # Return result return ret
def stop(self, timeout=None): """Stop the thread.""" logger.debug("ports plugin - Close thread for scan list {}".format( self._stats)) self._stopper.set()
def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # In web server mode if args.webserver: args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: '******'Define the Glances webserver username: '******'Enter the Glances server username: '******'glances' args.username = self.username if args.password_prompt: # Interactive or file password if args.server: args.password = self.__get_password( description= 'Define the Glances server password ({0} username): '. format(args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description= 'Define the Glances webserver password ({0} username): '. format(args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description= 'Enter the Glances server password ({0} username): '. format(args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage full quicklook option if args.full_quicklook: logger.info("Disable QuickLook menu") args.disable_quicklook = False args.disable_cpu = True args.disable_mem = True args.disable_swap = True args.disable_load = False # Manage disable_top option if args.disable_top: logger.info("Disable top menu") args.disable_quicklook = True args.disable_cpu = True args.disable_mem = True args.disable_swap = True args.disable_load = True # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = args.export_csv or args.export_elasticsearch or args.export_statsd or args.export_influxdb or args.export_opentsdb or args.export_rabbitmq if not (self.is_standalone() or self.is_client()) and export_tag: logger.critical( "Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Check graph output path if args.enable_history and args.path_history is not None: if not os.access(args.path_history, os.W_OK): logger.critical( "History output path {0} do not exist or is not writable". format(args.path_history)) sys.exit(2) logger.debug("History output path is set to {0}".format( args.path_history)) # Disable HDDTemp if sensors are disabled if args.disable_sensors: args.disable_hddtemp = True logger.debug("Sensors and HDDTemp are disabled") return args
def exit(self): """Just log an event when Glances exit.""" logger.debug("Stop the {} plugin".format(self.plugin_name))
def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) else: from warnings import simplefilter simplefilter("ignore") # Plugins disable/enable if args.disable_plugin is not None: for p in args.disable_plugin.split(','): disable(args, p) # Exporters activation if args.export is not None: for p in args.export.split(','): setattr(args, 'export_' + p, True) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Port in the -c URI #996 if args.client is not None: args.client, args.port = (x if x else y for (x, y) in zip( args.client.partition(':')[::2], (args.client, args.port))) # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # By default Windows is started in Web mode if WINDOWS: args.webserver = True # In web server mode, default refresh time: 5 sec if args.webserver: args.time = 5 args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: '******'Define the Glances webserver username: '******'Enter the Glances server username: '******'glances' args.username = self.username if args.password_prompt: # Interactive or file password if args.server: args.password = self.__get_password( description= 'Define the Glances server password ({} username): '. format(args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description= 'Define the Glances webserver password ({} username): '. format(args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description= 'Enter the Glances server password ({} username): '.format( args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage light mode if args.enable_light: logger.info("Light mode is on") args.disable_left_sidebar = True disable(args, 'process') disable(args, 'alert') disable(args, 'amps') disable(args, 'docker') # Manage full quicklook option if args.full_quicklook: logger.info("Full quicklook mode") enable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') enable(args, 'load') # Manage disable_top option if args.disable_top: logger.info("Disable top menu") disable(args, 'quicklook') disable(args, 'cpu') disable(args, 'mem') disable(args, 'memswap') disable(args, 'load') # Init the generate_graph tag # Should be set to True to generate graphs args.generate_graph = False # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = self.args.export is not None and any(self.args.export) if WINDOWS and export_tag: # On Windows, export is possible but only in quiet mode # See issue #1038 logger.info("On Windows OS, export disable the Web interface") self.args.quiet = True self.args.webserver = False elif not (self.is_standalone() or self.is_client()) and export_tag: logger.critical( "Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Disable HDDTemp if sensors are disabled if getattr(args, 'disable_sensors', False): disable(args, 'hddtemp') logger.debug("Sensors and HDDTemp are disabled") return args
import re import threading import time from glances.compat import iterkeys, itervalues from glances.logger import logger from glances.timer import getTimeSinceLastUpdate from glances.plugins.glances_plugin import GlancesPlugin # Docker-py library (optional and Linux-only) # https://github.com/docker/docker-py try: import docker import requests except ImportError as e: logger.debug("Docker library not found (%s). Glances cannot grab Docker info." % e) docker_tag = False else: docker_tag = True class Plugin(GlancesPlugin): """Glances Docker plugin. stats is a list """ def __init__(self, args=None): """Init the plugin.""" super(Plugin, self).__init__(args=args)
def exit(self): """Close the CSV file.""" logger.debug("Finalise export interface %s" % self.export_name) self.csv_file.close()
def display(self, stats, cs_status=None): """Display stats on the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to a Glances server "SNMP": Client is connected to a SNMP server "Disconnected": Client is disconnected from the server Return: True if the stats have been displayed False if the help have been displayed """ # Init the internal line/column for Glances Curses self.init_line_column() # No processes list in SNMP mode if cs_status == 'SNMP': # so... more space for others plugins plugin_max_width = 43 else: plugin_max_width = None # Update the stats messages ########################### # Update the client server status self.args.cs_status = cs_status __stat_display = self.__get_stat_display(stats, plugin_max_width) # Adapt number of processes to the available space max_processes_displayed = ( self.screen.getmaxyx()[0] - 11 - self.get_stats_display_height(__stat_display["alert"]) - self.get_stats_display_height(__stat_display["docker"])) try: if self.args.enable_process_extended and not self.args.process_tree: max_processes_displayed -= 4 except AttributeError: pass if max_processes_displayed < 0: max_processes_displayed = 0 if (glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed): logger.debug("Set number of displayed processes to {}".format( max_processes_displayed)) glances_processes.max_processes = max_processes_displayed __stat_display["processlist"] = stats.get_plugin( 'processlist').get_stats_display(args=self.args) # Display the stats on the curses interface ########################################### # Help screen (on top of the other stats) if self.args.help_tag: # Display the stats... self.display_plugin( stats.get_plugin('help').get_stats_display(args=self.args)) # ... and exit return False # ===================================== # Display first line (system+ip+uptime) # Optionnaly: Cloud on second line # ===================================== self.__display_firstline(__stat_display) # ============================================================== # Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP) # ============================================================== self.__display_secondline(__stat_display, stats) # ================================================================== # Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time) # ================================================================== self.__display_left(__stat_display) # ==================================== # Display right stats (process and co) # ==================================== self.__display_right(__stat_display) # History option # Generate history graph if self.graph_tag and self.args.export_graph: self.display_popup( 'Generate graphs history in {}\nPlease wait...'.format( self.glances_graph.get_output_folder())) self.display_popup( 'Generate graphs history in {}\nDone: {} graphs generated'. format(self.glances_graph.get_output_folder(), self.glances_graph.generate_graph(stats))) elif self.reset_history_tag and self.args.export_graph: self.display_popup('Reset graph history') self.glances_graph.reset(stats) elif (self.graph_tag or self.reset_history_tag) and not self.args.export_graph: try: self.glances_graph.graph_enabled() except Exception: self.display_popup( 'Graph disabled\nEnable it using --export-graph') else: self.display_popup('Graph disabled') self.graph_tag = False self.reset_history_tag = False # Display edit filter popup # Only in standalone mode (cs_status is None) if self.edit_filter and cs_status is None: new_filter = self.display_popup( 'Process filter pattern: \n\n' + 'Examples:\n' + '- python\n' + '- .*python.*\n' + '- \/usr\/lib.*\n' + '- name:.*nautilus.*\n' + '- cmdline:.*glances.*\n' + '- username:nicolargo\n' + '- username:^root ', is_input=True, input_value=glances_processes.process_filter_input) glances_processes.process_filter = new_filter elif self.edit_filter and cs_status is not None: self.display_popup( 'Process filter only available in standalone mode') self.edit_filter = False return True
def get_process_curses_data(self, p, first, args): """Get curses data to display for a process.""" ret = [self.curse_new_line()] # CPU if 'cpu_percent' in p and p[ 'cpu_percent'] is not None and p['cpu_percent'] != '': if args.disable_irix and self.nb_log_core != 0: msg = '{:>6.1f}'.format(p['cpu_percent'] / float(self.nb_log_core)) else: msg = '{:>6.1f}'.format(p['cpu_percent']) ret.append( self.curse_add_line( msg, self.get_alert(p['cpu_percent'], header="cpu"))) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) # MEM if 'memory_percent' in p and p[ 'memory_percent'] is not None and p['memory_percent'] != '': msg = '{:>6.1f}'.format(p['memory_percent']) ret.append( self.curse_add_line( msg, self.get_alert(p['memory_percent'], header="mem"))) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) # VMS/RSS if 'memory_info' in p and p[ 'memory_info'] is not None and p['memory_info'] != '': # VMS msg = '{:>6}'.format( self.auto_unit(p['memory_info'][1], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) # RSS msg = '{:>6}'.format( self.auto_unit(p['memory_info'][0], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line(msg)) # PID msg = '{:>6}'.format(p['pid']) ret.append(self.curse_add_line(msg)) # USER if 'username' in p: # docker internal users are displayed as ints only, therefore str() # Correct issue #886 on Windows OS msg = ' {:9}'.format(str(p['username'])[:9]) ret.append(self.curse_add_line(msg)) else: msg = ' {:9}'.format('?') ret.append(self.curse_add_line(msg)) # NICE if 'nice' in p: nice = p['nice'] if nice is None: nice = '?' msg = '{:>5}'.format(nice) if isinstance(nice, int) and ((WINDOWS and nice != 32) or (not WINDOWS and nice != 0)): ret.append(self.curse_add_line(msg, decoration='NICE')) else: ret.append(self.curse_add_line(msg)) else: msg = '{:>5}'.format('?') ret.append(self.curse_add_line(msg)) # STATUS if 'status' in p: status = p['status'] msg = '{:>2}'.format(status) if status == 'R': ret.append(self.curse_add_line(msg, decoration='STATUS')) else: ret.append(self.curse_add_line(msg)) else: msg = '{:>2}'.format('?') ret.append(self.curse_add_line(msg)) # TIME+ if self.tag_proc_time: try: delta = timedelta(seconds=sum(p['cpu_times'])) except (OverflowError, TypeError) as e: # Catch OverflowError on some Amazon EC2 server # See https://github.com/nicolargo/glances/issues/87 # Also catch TypeError on Mac OS X # See: https://github.com/nicolargo/glances/issues/622 logger.debug("Cannot get TIME+ ({})".format(e)) self.tag_proc_time = False else: hours, minutes, seconds, microseconds = convert_timedelta( delta) if hours: msg = '{:>4}h'.format(hours) ret.append( self.curse_add_line(msg, decoration='CPU_TIME', optional=True)) msg = '{}:{}'.format(str(minutes).zfill(2), seconds) else: msg = '{:>4}:{}.{}'.format(minutes, seconds, microseconds) else: msg = '{:>10}'.format('?') ret.append(self.curse_add_line(msg, optional=True)) # IO read/write if 'io_counters' in p: # IO read io_rs = int((p['io_counters'][0] - p['io_counters'][2]) / p['time_since_update']) if io_rs == 0: msg = '{:>6}'.format("0") else: msg = '{:>6}'.format(self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # IO write io_ws = int((p['io_counters'][1] - p['io_counters'][3]) / p['time_since_update']) if io_ws == 0: msg = '{:>6}'.format("0") else: msg = '{:>6}'.format(self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) else: msg = '{:>6}'.format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # Command line # If no command line for the process is available, fallback to # the bare process name instead cmdline = p['cmdline'] try: # XXX: remove `cmdline != ['']` when we'll drop support for psutil<4.0.0 if cmdline and cmdline != ['']: path, cmd, arguments = split_cmdline(cmdline) if os.path.isdir(path) and not args.process_short_name: msg = ' {}'.format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) if glances_processes.is_tree_enabled(): # mark position to add tree decoration ret[-1]["_tree_decoration"] = True ret.append( self.curse_add_line(cmd, decoration='PROCESS', splittable=True)) else: msg = ' {}'.format(cmd) ret.append( self.curse_add_line(msg, decoration='PROCESS', splittable=True)) if glances_processes.is_tree_enabled(): # mark position to add tree decoration ret[-1]["_tree_decoration"] = True if arguments: msg = ' {}'.format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = ' {}'.format(p['name']) ret.append(self.curse_add_line(msg, splittable=True)) except UnicodeEncodeError: ret.append(self.curse_add_line('', splittable=True)) # Add extended stats but only for the top processes # !!! CPU consumption ??? # TODO: extended stats into the web interface if first and 'extended_stats' in p: # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p['cpu_affinity'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'CPU affinity: ' + str(len( p['cpu_affinity'])) + ' cores' ret.append(self.curse_add_line(msg, splittable=True)) # Second line is memory info if 'memory_info' in p and p['memory_info'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'Memory info: ' for k, v in iteritems(p['memory_info']._asdict()): # Ignore rss and vms (already displayed) if k not in ['rss', 'vms'] and v is not None: msg += k + ' ' + self.auto_unit( v, low_precision=False) + ' ' if 'memory_swap' in p and p['memory_swap'] is not None: msg += 'swap ' + self.auto_unit(p['memory_swap'], low_precision=False) ret.append(self.curse_add_line(msg, splittable=True)) # Third line is for open files/network sessions msg = '' if 'num_threads' in p and p['num_threads'] is not None: msg += 'threads ' + str(p['num_threads']) + ' ' if 'num_fds' in p and p['num_fds'] is not None: msg += 'files ' + str(p['num_fds']) + ' ' if 'num_handles' in p and p['num_handles'] is not None: msg += 'handles ' + str(p['num_handles']) + ' ' if 'tcp' in p and p['tcp'] is not None: msg += 'TCP ' + str(p['tcp']) + ' ' if 'udp' in p and p['udp'] is not None: msg += 'UDP ' + str(p['udp']) + ' ' if msg != '': ret.append(self.curse_new_line()) msg = xpad + 'Open: ' + msg ret.append(self.curse_add_line(msg, splittable=True)) # Fouth line is IO nice level (only Linux and Windows OS) if 'ionice' in p and p['ionice'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'IO nice: ' k = 'Class is ' v = p['ionice'].ioclass # Linux: The scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Windows: On Windows only ioclass is used and it can be set to 2 (normal), 1 (low) or 0 (very low). if WINDOWS: if v == 0: msg += k + 'Very Low' elif v == 1: msg += k + 'Low' elif v == 2: msg += 'No specific I/O priority' else: msg += k + str(v) else: if v == 0: msg += 'No specific I/O priority' elif v == 1: msg += k + 'Real Time' elif v == 2: msg += k + 'Best Effort' elif v == 3: msg += k + 'IDLE' else: msg += k + str(v) # value is a number which goes from 0 to 7. # The higher the value, the lower the I/O priority of the process. if hasattr(p['ionice'], 'value') and p['ionice'].value != 0: msg += ' (value %s/7)' % str(p['ionice'].value) ret.append(self.curse_add_line(msg, splittable=True)) return ret
def get_docker_io(self, container_id, all_stats): """Return the container IO usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}. with: time_since_update: number of seconds elapsed between the latest grab ior: Number of byte readed iow: Number of byte written """ # Init the returned dict io_new = {} # Read the ior/iow stats (in bytes) try: iocounters = all_stats["blkio_stats"] except KeyError as e: # all_stats do not have io information logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e)) logger.debug(all_stats) # No fallback available... return io_new # Previous io interface stats are stored in the io_old variable if not hasattr(self, 'iocounters_old'): # First call, we init the io_old var self.iocounters_old = {} try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass if container_id not in self.iocounters_old: try: self.iocounters_old[container_id] = iocounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable IoR/s and IoW/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API try: # Read IOR and IOW value in the structure list of dict ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value'] iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value'] ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value'] iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value'] except (IndexError, KeyError) as e: # all_stats do not have io information logger.debug("Cannot grab block IO usage for container {} ({})".format(container_id, e)) else: io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id)) io_new['ior'] = ior - ior_old io_new['iow'] = iow - iow_old io_new['cumulative_ior'] = ior io_new['cumulative_iow'] = iow # Save stats to compute next bitrate self.iocounters_old[container_id] = iocounters # Return the stats return io_new
def update(self): """Update Docker stats using the input method.""" # Reset stats self.reset() # Get the current Docker API client if not self.docker_client: # First time, try to connect to the server self.docker_client = self.connect() if self.docker_client is None: global docker_tag docker_tag = False # The Docker-py lib is mandatory if not docker_tag or (self.args is not None and self.args.disable_docker): return self.stats if self.input_method == 'local': # Update stats # Docker version # Exemple: { # "KernelVersion": "3.16.4-tinycore64", # "Arch": "amd64", # "ApiVersion": "1.15", # "Version": "1.3.0", # "GitCommit": "c78088f", # "Os": "linux", # "GoVersion": "go1.3.3" # } try: self.stats['version'] = self.docker_client.version() except Exception as e: # Correct issue#649 logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e)) return self.stats # Container globals information # Example: [{u'Status': u'Up 36 seconds', # u'Created': 1420378904, # u'Image': u'nginx:1', # u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443}, # {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}], # u'Command': u"nginx -g 'daemon off;'", # u'Names': [u'/webstack_nginx_1'], # u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}] # Update current containers list try: self.stats['containers'] = self.docker_client.containers() or [] except Exception as e: logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e)) return self.stats # Start new thread for new container for container in self.stats['containers']: if container['Id'] not in self.thread_list: # Thread did not exist in the internal dict # Create it and add it to the internal dict logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container['Id'][:12])) t = ThreadDockerGrabber(self.docker_client, container['Id']) self.thread_list[container['Id']] = t t.start() # Stop threads for non-existing containers nonexisting_containers = set(iterkeys(self.thread_list)) - set([c['Id'] for c in self.stats['containers']]) for container_id in nonexisting_containers: # Stop the thread logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12])) self.thread_list[container_id].stop() # Delete the item from the dict del self.thread_list[container_id] # Get stats for all containers for container in self.stats['containers']: # The key is the container name and not the Id container['key'] = self.get_key() # Export name (first name in the list, without the /) container['name'] = container['Names'][0][1:] container['cpu'] = self.get_docker_cpu(container['Id'], self.thread_list[container['Id']].stats) container['memory'] = self.get_docker_memory(container['Id'], self.thread_list[container['Id']].stats) container['network'] = self.get_docker_network(container['Id'], self.thread_list[container['Id']].stats) container['io'] = self.get_docker_io(container['Id'], self.thread_list[container['Id']].stats) elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass return self.stats
def update(self): """Update network stats using the input method. Stats is a list of dict (one dict per interface) """ # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Grab network interface stat using the psutil net_io_counter method try: netiocounters = psutil.net_io_counters(pernic=True) except UnicodeDecodeError as e: logger.debug('Can not get network interface counters ({})'.format(e)) return self.stats # Grab interface's status (issue #765) # Grab interface's speed (issue #718) netstatus = {} try: netstatus = psutil.net_if_stats() except OSError as e: # see psutil #797/glances #1106 logger.debug('Can not get network interface status ({})'.format(e)) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass return self.stats # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account # or KeyError: 'eth0' when interface is not connected #1348 if self.is_hide(net) or net not in netstatus: continue try: cumulative_rx = network_new[net].bytes_recv cumulative_tx = network_new[net].bytes_sent cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - self.network_old[net].bytes_recv tx = cumulative_tx - self.network_old[net].bytes_sent cx = rx + tx netstat = {'interface_name': n(net), 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx, # Interface status 'is_up': netstatus[net].isup, # Interface speed in Mbps, convert it to bps # Can be always 0 on some OSes 'speed': netstatus[net].speed * 1048576, # Set the key for the dict 'key': self.get_key() } except KeyError: continue else: # Append the interface stats to the list stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new elif self.input_method == 'snmp': # Update stats using SNMP # SNMP bulk command to get all network interface in one shot try: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True) except KeyError: netiocounters = self.get_stats_snmp(snmp_oid=snmp_oid['default'], bulk=True) # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'network_old'): # First call, we init the network_old var try: self.network_old = netiocounters except (IOError, UnboundLocalError): pass else: # See description in the 'local' block time_since_update = getTimeSinceLastUpdate('net') # Loop over interfaces network_new = netiocounters for net in network_new: # Do not take hidden interface into account if self.is_hide(net): continue try: # Windows: a tips is needed to convert HEX to TXT # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx if self.short_system_name == 'windows': try: interface_name = str(base64.b16decode(net[2:-2].upper())) except TypeError: interface_name = net else: interface_name = net cumulative_rx = float(network_new[net]['cumulative_rx']) cumulative_tx = float(network_new[net]['cumulative_tx']) cumulative_cx = cumulative_rx + cumulative_tx rx = cumulative_rx - float(self.network_old[net]['cumulative_rx']) tx = cumulative_tx - float(self.network_old[net]['cumulative_tx']) cx = rx + tx netstat = { 'interface_name': interface_name, 'time_since_update': time_since_update, 'cumulative_rx': cumulative_rx, 'rx': rx, 'cumulative_tx': cumulative_tx, 'tx': tx, 'cumulative_cx': cumulative_cx, 'cx': cx} except KeyError: continue else: netstat['key'] = self.get_key() stats.append(netstat) # Save stats to compute next bitrate self.network_old = network_new # Update the stats self.stats = stats return self.stats
def parse_args(self): """Parse command line arguments.""" args = self.init_args().parse_args() # Load the configuration file, if it exists self.config = Config(args.conf_file) # Debug mode if args.debug: from logging import DEBUG logger.setLevel(DEBUG) # Client/server Port if args.port is None: if args.webserver: args.port = self.web_server_port else: args.port = self.server_port # Port in the -c URI #996 if args.client is not None: args.client, args.port = (x if x else y for (x, y) in zip(args.client.partition(':')[::2], (args.client, args.port))) # Autodiscover if args.disable_autodiscover: logger.info("Auto discover mode is disabled") # By default Windows is started in Web mode if WINDOWS: args.webserver = True # In web server mode if args.webserver: args.process_short_name = True # Server or client login/password if args.username_prompt: # Every username needs a password args.password_prompt = True # Prompt username if args.server: args.username = self.__get_username( description='Define the Glances server username: '******'Define the Glances webserver username: '******'Enter the Glances server username: '******'glances' args.username = self.username if args.password_prompt: # Interactive or file password if args.server: args.password = self.__get_password( description='Define the Glances server password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.webserver: args.password = self.__get_password( description='Define the Glances webserver password ({} username): '.format( args.username), confirm=True, username=args.username) elif args.client: args.password = self.__get_password( description='Enter the Glances server password ({} username): '.format( args.username), clear=True, username=args.username) else: # Default is no password args.password = self.password # By default help is hidden args.help_tag = False # Display Rx and Tx, not the sum for the network args.network_sum = False args.network_cumul = False # Manage full quicklook option if args.full_quicklook: logger.info("Disable QuickLook menu") args.disable_quicklook = False args.disable_cpu = True args.disable_mem = True args.disable_memswap = True args.disable_load = False # Manage disable_top option if args.disable_top: logger.info("Disable top menu") args.disable_quicklook = True args.disable_cpu = True args.disable_mem = True args.disable_memswap = True args.disable_load = True # Control parameter and exit if it is not OK self.args = args # Export is only available in standalone or client mode (issue #614) export_tag = ( args.export_csv or args.export_cassandra or args.export_couchdb or args.export_elasticsearch or args.export_influxdb or args.export_kafka or args.export_opentsdb or args.export_prometheus or args.export_rabbitmq or args.export_riemann or args.export_statsd or args.export_zeromq ) if WINDOWS and export_tag: # On Windows, export is possible but only in quiet mode # See issue #1038 logger.info("On Windows OS, export disable the Web interface") self.args.quiet = True self.args.webserver = False elif not (self.is_standalone() or self.is_client()) and export_tag: logger.critical("Export is only available in standalone or client mode") sys.exit(2) # Filter is only available in standalone mode if args.process_filter is not None and not self.is_standalone(): logger.critical( "Process filter is only available in standalone mode") sys.exit(2) # Check graph output path if args.export_graph and args.path_graph is not None: if not os.access(args.path_graph, os.W_OK): logger.critical("Graphs output path {} doesn't exist or is not writable".format(args.path_graph)) sys.exit(2) logger.debug( "Graphs output path is set to {}".format(args.path_graph)) # For export graph, history is mandatory if args.export_graph and args.disable_history: logger.critical("Can not export graph if history is disabled") sys.exit(2) # Disable HDDTemp if sensors are disabled if args.disable_sensors: args.disable_hddtemp = True logger.debug("Sensors and HDDTemp are disabled") return args
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.reset_processcount() # Do not process if disable tag is set if self.disable_tag: return # Time since last update (for disk_io rate computation) time_since_update = getTimeSinceLastUpdate('process_disk') # Grab standard stats ##################### standard_attrs = ['cmdline', 'cpu_percent', 'cpu_times', 'memory_info', 'memory_percent', 'name', 'nice', 'pid', 'ppid', 'status', 'username', 'status', 'num_threads'] # io_counters availability: Linux, BSD, Windows, AIX if not MACOS and not SUNOS and not WSL: standard_attrs += ['io_counters'] # gids availability: Unix if not WINDOWS: standard_attrs += ['gids'] # and build the processes stats list (psutil>=5.3.0) self.processlist = [p.info for p in psutil.process_iter(attrs=standard_attrs, ad_value=None) # OS-related processes filter if not (BSD and p.info['name'] == 'idle') and not (WINDOWS and p.info['name'] == 'System Idle Process') and not (MACOS and p.info['name'] == 'kernel_task') and # Kernel threads filter not (self.no_kernel_threads and LINUX and p.info['gids'].real == 0) and # User filter not (self._filter.is_filtered(p.info))] # Sort the processes list by the current sort_key self.processlist = sort_stats(self.processlist, sortedby=self.sort_key, reverse=True) # Update the processcount self.update_processcount(self.processlist) # Loop over processes and add metadata first = True for proc in self.processlist: # Get extended stats, only for top processes (see issue #403). if first and not self.disable_extended_tag: # - cpu_affinity (Linux, Windows, FreeBSD) # - ionice (Linux and Windows > Vista) # - num_ctx_switches (not available on Illumos/Solaris) # - num_fds (Unix-like) # - num_handles (Windows) # - memory_maps (only swap, Linux) # https://www.cyberciti.biz/faq/linux-which-process-is-using-swap/ # - connections (TCP and UDP) extended = {} try: top_process = psutil.Process(proc['pid']) extended_stats = ['cpu_affinity', 'ionice', 'num_ctx_switches'] if LINUX: # num_fds only avalable on Unix system (see issue #1351) extended_stats += ['num_fds'] if WINDOWS: extended_stats += ['num_handles'] # Get the extended stats extended = top_process.as_dict(attrs=extended_stats, ad_value=None) if LINUX: try: extended['memory_swap'] = sum([v.swap for v in top_process.memory_maps()]) except psutil.NoSuchProcess: pass except (psutil.AccessDenied, NotImplementedError): # NotImplementedError: /proc/${PID}/smaps file doesn't exist # on kernel < 2.6.14 or CONFIG_MMU kernel configuration option # is not enabled (see psutil #533/glances #413). extended['memory_swap'] = None try: extended['tcp'] = len(top_process.connections(kind="tcp")) extended['udp'] = len(top_process.connections(kind="udp")) except (psutil.AccessDenied, psutil.NoSuchProcess): # Manage issue1283 (psutil.AccessDenied) extended['tcp'] = None extended['udp'] = None except (psutil.NoSuchProcess, ValueError, AttributeError) as e: logger.error('Can not grab extended stats ({})'.format(e)) extended['extended_stats'] = False else: logger.debug('Grab extended stats for process {}'.format(proc['pid'])) extended['extended_stats'] = True proc.update(extended) first = False # /End of extended stats # Time since last update (for disk_io rate computation) proc['time_since_update'] = time_since_update # Process status (only keep the first char) proc['status'] = str(proc['status'])[:1].upper() # Process IO # procstat['io_counters'] is a list: # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag] # If io_tag = 0 > Access denied or first time (display "?") # If io_tag = 1 > No access denied (display the IO rate) if 'io_counters' in proc and proc['io_counters'] is not None: io_new = [proc['io_counters'].read_bytes, proc['io_counters'].write_bytes] # For IO rate computation # Append saved IO r/w bytes try: proc['io_counters'] = io_new + self.io_old[proc['pid']] io_tag = 1 except KeyError: proc['io_counters'] = io_new + [0, 0] io_tag = 0 # then save the IO r/w bytes self.io_old[proc['pid']] = io_new else: proc['io_counters'] = [0, 0] + [0, 0] io_tag = 0 # Append the IO tag (for display) proc['io_counters'] += [io_tag] # Compute the maximum value for keys in self._max_values_list: CPU, MEM # Usefull to highlight the processes with maximum values for k in self._max_values_list: values_list = [i[k] for i in self.processlist if i[k] is not None] if values_list != []: self.set_max_values(k, max(values_list))
def display(self, stats, cs_status=None): """Display stats on the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to a Glances server "SNMP": Client is connected to a SNMP server "Disconnected": Client is disconnected from the server Return: True if the stats have been displayed False if the help have been displayed """ # Init the internal line/column for Glances Curses self.init_line_column() # Get the screen size screen_x = self.screen.getmaxyx()[1] screen_y = self.screen.getmaxyx()[0] # No processes list in SNMP mode if cs_status == 'SNMP': # so... more space for others plugins plugin_max_width = 43 else: plugin_max_width = None # Update the stats messages ########################### # Update the client server status self.args.cs_status = cs_status stats_system = stats.get_plugin('system').get_stats_display( args=self.args) stats_uptime = stats.get_plugin('uptime').get_stats_display() if self.args.percpu: stats_cpu = stats.get_plugin('percpu').get_stats_display( args=self.args) else: stats_cpu = stats.get_plugin('cpu').get_stats_display( args=self.args) stats_load = stats.get_plugin('load').get_stats_display(args=self.args) stats_mem = stats.get_plugin('mem').get_stats_display(args=self.args) stats_memswap = stats.get_plugin('memswap').get_stats_display( args=self.args) stats_network = stats.get_plugin('network').get_stats_display( args=self.args, max_width=plugin_max_width) try: stats_ip = stats.get_plugin('ip').get_stats_display(args=self.args) except AttributeError: stats_ip = None stats_diskio = stats.get_plugin('diskio').get_stats_display( args=self.args) stats_fs = stats.get_plugin('fs').get_stats_display( args=self.args, max_width=plugin_max_width) stats_folders = stats.get_plugin('folders').get_stats_display( args=self.args, max_width=plugin_max_width) stats_raid = stats.get_plugin('raid').get_stats_display(args=self.args) stats_sensors = stats.get_plugin('sensors').get_stats_display( args=self.args) stats_now = stats.get_plugin('now').get_stats_display() stats_docker = stats.get_plugin('docker').get_stats_display( args=self.args) stats_processcount = stats.get_plugin( 'processcount').get_stats_display(args=self.args) stats_monitor = stats.get_plugin('monitor').get_stats_display( args=self.args) stats_alert = stats.get_plugin('alert').get_stats_display( args=self.args) # Adapt number of processes to the available space max_processes_displayed = screen_y - 11 - \ self.get_stats_display_height(stats_alert) - \ self.get_stats_display_height(stats_docker) try: if self.args.enable_process_extended and not self.args.process_tree: max_processes_displayed -= 4 except AttributeError: pass if max_processes_displayed < 0: max_processes_displayed = 0 if (glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed): logger.debug("Set number of displayed processes to {0}".format( max_processes_displayed)) glances_processes.max_processes = max_processes_displayed stats_processlist = stats.get_plugin('processlist').get_stats_display( args=self.args) # Display the stats on the curses interface ########################################### # Help screen (on top of the other stats) if self.args.help_tag: # Display the stats... self.display_plugin( stats.get_plugin('help').get_stats_display(args=self.args)) # ... and exit return False # ================================== # Display first line (system+uptime) # ================================== # Space between column self.space_between_column = 0 self.new_line() l_uptime = self.get_stats_display_width( stats_system ) + self.space_between_column + self.get_stats_display_width( stats_ip) + 3 + self.get_stats_display_width(stats_uptime) self.display_plugin(stats_system, display_optional=(screen_x >= l_uptime)) self.new_column() self.display_plugin(stats_ip) # Space between column self.space_between_column = 3 self.new_column() self.display_plugin(stats_uptime) # ======================================================== # Display second line (<SUMMARY>+CPU|PERCPU+LOAD+MEM+SWAP) # ======================================================== self.init_column() self.new_line() # Init quicklook stats_quicklook = {'msgdict': []} quicklook_width = 0 # Get stats for CPU, MEM, SWAP and LOAD (if needed) if self.args.disable_cpu: cpu_width = 0 else: cpu_width = self.get_stats_display_width(stats_cpu) if self.args.disable_mem: mem_width = 0 else: mem_width = self.get_stats_display_width(stats_mem) if self.args.disable_swap: swap_width = 0 else: swap_width = self.get_stats_display_width(stats_memswap) if self.args.disable_load: load_width = 0 else: load_width = self.get_stats_display_width(stats_load) # Size of plugins but quicklook stats_width = cpu_width + mem_width + swap_width + load_width # Number of plugin but quicklook stats_number = ( int(not self.args.disable_cpu and stats_cpu['msgdict'] != []) + int(not self.args.disable_mem and stats_mem['msgdict'] != []) + int(not self.args.disable_swap and stats_memswap['msgdict'] != []) + int(not self.args.disable_load and stats_load['msgdict'] != [])) if not self.args.disable_quicklook: # Quick look is in the place ! if self.args.full_quicklook: quicklook_width = screen_x - ( stats_width + 8 + stats_number * self.space_between_column) else: quicklook_width = min( screen_x - (stats_width + 8 + stats_number * self.space_between_column), 79) try: stats_quicklook = stats.get_plugin( 'quicklook').get_stats_display(max_width=quicklook_width, args=self.args) except AttributeError as e: logger.debug("Quicklook plugin not available (%s)" % e) else: quicklook_width = self.get_stats_display_width(stats_quicklook) stats_width += quicklook_width + 1 self.space_between_column = 1 self.display_plugin(stats_quicklook) self.new_column() # Compute spaces between plugins # Note: Only one space between Quicklook and others display_optional_cpu = True display_optional_mem = True if stats_number > 1: self.space_between_column = max( 1, int((screen_x - stats_width) / (stats_number - 1))) # No space ? Remove optionnal MEM stats if self.space_between_column < 3: display_optional_mem = False if self.args.disable_mem: mem_width = 0 else: mem_width = self.get_stats_display_width( stats_mem, without_option=True) stats_width = quicklook_width + 1 + cpu_width + mem_width + swap_width + load_width self.space_between_column = max( 1, int((screen_x - stats_width) / (stats_number - 1))) # No space again ? Remove optionnal CPU stats if self.space_between_column < 3: display_optional_cpu = False if self.args.disable_cpu: cpu_width = 0 else: cpu_width = self.get_stats_display_width( stats_cpu, without_option=True) stats_width = quicklook_width + 1 + cpu_width + mem_width + swap_width + load_width self.space_between_column = max( 1, int((screen_x - stats_width) / (stats_number - 1))) else: self.space_between_column = 0 # Display CPU, MEM, SWAP and LOAD self.display_plugin(stats_cpu, display_optional=display_optional_cpu) self.new_column() self.display_plugin(stats_mem, display_optional=display_optional_mem) self.new_column() self.display_plugin(stats_memswap) self.new_column() self.display_plugin(stats_load) # Space between column self.space_between_column = 3 # Backup line position self.saved_line = self.next_line # ================================================================== # Display left sidebar (NETWORK+DISKIO+FS+SENSORS+Current time) # ================================================================== self.init_column() if not (self.args.disable_network and self.args.disable_diskio and self.args.disable_fs and self.args.disable_folder and self.args.disable_raid and self.args.disable_sensors ) and not self.args.disable_left_sidebar: self.new_line() self.display_plugin(stats_network) self.new_line() self.display_plugin(stats_diskio) self.new_line() self.display_plugin(stats_fs) self.new_line() self.display_plugin(stats_folders) self.new_line() self.display_plugin(stats_raid) self.new_line() self.display_plugin(stats_sensors) self.new_line() self.display_plugin(stats_now) # ==================================== # Display right stats (process and co) # ==================================== # If space available... if screen_x > 52: # Restore line position self.next_line = self.saved_line # Display right sidebar # ((DOCKER)+PROCESS_COUNT+(MONITORED)+PROCESS_LIST+ALERT) self.new_column() self.new_line() self.display_plugin(stats_docker) self.new_line() self.display_plugin(stats_processcount) if glances_processes.process_filter is None and cs_status is None: # Do not display stats monitor list if a filter exist self.new_line() self.display_plugin(stats_monitor) self.new_line() self.display_plugin( stats_processlist, display_optional=(screen_x > 102), display_additional=(not OSX), max_y=(screen_y - self.get_stats_display_height(stats_alert) - 2)) self.new_line() self.display_plugin(stats_alert) # History option # Generate history graph if self.history_tag and self.args.enable_history: self.display_popup( 'Generate graphs history in {0}\nPlease wait...'.format( self.glances_history.get_output_folder())) self.display_popup( 'Generate graphs history in {0}\nDone: {1} graphs generated'. format(self.glances_history.get_output_folder(), self.glances_history.generate_graph(stats))) elif self.reset_history_tag and self.args.enable_history: self.display_popup('Reset history') self.glances_history.reset(stats) elif (self.history_tag or self.reset_history_tag) and not self.args.enable_history: try: self.glances_history.graph_enabled() except Exception: self.display_popup( 'History disabled\nEnable it using --enable-history') else: self.display_popup( 'History disabled\nPlease install matplotlib') self.history_tag = False self.reset_history_tag = False # Display edit filter popup # Only in standalone mode (cs_status is None) if self.edit_filter and cs_status is None: new_filter = self.display_popup( 'Process filter pattern: ', is_input=True, input_value=glances_processes.process_filter) glances_processes.process_filter = new_filter elif self.edit_filter and cs_status != 'None': self.display_popup( 'Process filter only available in standalone mode') self.edit_filter = False return True
def generate_graph(self, stats): """Generate graphs from plugins history. Return the number of output files generated by the function. """ if not self.graph_enabled(): return 0 index_all = 0 for p in stats.getAllPlugins(): # History h = stats.get_plugin(p).get_export_history() # Current plugin item history list ih = stats.get_plugin(p).get_items_history_list() # Check if we must process history if h is None or ih is None: # History (h) not available for plugin (p) continue # Init graph plt.clf() index_graph = 0 handles = [] labels = [] for i in ih: if i['name'] in iterkeys(h): # The key exist # Add the curves in the current chart logger.debug("Generate graph: %s %s" % (p, i['name'])) index_graph += 1 # Labels handles.append( plt.Rectangle((0, 0), 1, 1, fc=self.get_graph_color(i), ec=self.get_graph_color(i), linewidth=2)) labels.append(self.get_graph_legend(i)) # Legend plt.ylabel(self.get_graph_yunit(i, pre_label='')) # Curves plt.grid(True) # Points are stored as tuple (date, value) x, y = zip(*h[i['name']]) plt.plot_date(x, y, fmt='', drawstyle='default', linestyle='-', color=self.get_graph_color(i), xdate=True, ydate=False) if index_graph == 1: # Title only on top of the first graph plt.title(p.capitalize()) else: # The key did not exist # Find if anothers key ends with the key # Ex: key='tx' => 'ethernet_tx' # Add one curve per chart stats_history_filtered = sorted([ key for key in iterkeys(h) if key.endswith('_' + i['name']) ]) logger.debug("Generate graphs: %s %s" % (p, stats_history_filtered)) if len(stats_history_filtered) > 0: # Create 'n' graph # Each graph iter through the stats plt.clf() index_item = 0 for k in stats_history_filtered: index_item += 1 plt.subplot(len(stats_history_filtered), 1, index_item) # Legend plt.ylabel(self.get_graph_yunit(i, pre_label=k)) # Curves plt.grid(True) # Points are stored as tuple (date, value) x, y = zip(*h[k]) plt.plot_date(x, y, fmt='', drawstyle='default', linestyle='-', color=self.get_graph_color(i), xdate=True, ydate=False) if index_item == 1: # Title only on top of the first graph plt.title(p.capitalize() + ' ' + i['name']) # Save the graph to output file fig = plt.gcf() fig.set_size_inches(20, 5 * index_item) plt.xlabel('Date') plt.savefig(os.path.join( self.output_folder, 'glances_%s_%s.png' % (p, i['name'])), dpi=72) index_all += 1 if index_graph > 0: # Save the graph to output file fig = plt.gcf() fig.set_size_inches(20, 10) plt.legend(handles, labels, loc=1, prop={'size': 9}) plt.xlabel('Date') plt.savefig(os.path.join(self.output_folder, 'glances_%s.png' % p), dpi=72) index_all += 1 plt.close() return index_all
def display_popup(self, message, size_x=None, size_y=None, duration=3, is_input=False, input_size=30, input_value=None): """ Display a centered popup. If is_input is False: Display a centered popup with the given message during duration seconds If size_x and size_y: set the popup size else set it automatically Return True if the popup could be displayed If is_input is True: Display a centered popup with the given message and a input field If size_x and size_y: set the popup size else set it automatically Return the input string or None if the field is empty """ # Center the popup sentence_list = message.split('\n') if size_x is None: size_x = len(max(sentence_list, key=len)) + 4 # Add space for the input field if is_input: size_x += input_size if size_y is None: size_y = len(sentence_list) + 4 screen_x = self.screen.getmaxyx()[1] screen_y = self.screen.getmaxyx()[0] if size_x > screen_x or size_y > screen_y: # No size to display the popup => abord return False pos_x = int((screen_x - size_x) / 2) pos_y = int((screen_y - size_y) / 2) # Create the popup popup = curses.newwin(size_y, size_x, pos_y, pos_x) # Fill the popup popup.border() # Add the message for y, m in enumerate(message.split('\n')): popup.addnstr(2 + y, 2, m, len(m)) if is_input and not WINDOWS: # Create a subwindow for the text field subpop = popup.derwin(1, input_size, 2, 2 + len(m)) subpop.attron(self.colors_list['FILTER']) # Init the field with the current value if input_value is not None: subpop.addnstr(0, 0, input_value, len(input_value)) # Display the popup popup.refresh() subpop.refresh() # Create the textbox inside the subwindows self.set_cursor(2) self.flash_cursor() textbox = GlancesTextbox(subpop, insert_mode=False) textbox.edit() self.set_cursor(0) self.no_flash_cursor() if textbox.gather() != '': logger.debug("User enters the following string: %s" % textbox.gather()) return textbox.gather()[:-1] else: logger.debug("User centers an empty string") return None else: # Display the popup popup.refresh() curses.napms(duration * 1000) return True
def get_process_curses_data(self, p, first, args): """Get curses data to display for a process. - p is the process to display - first is a tag=True if the process is the first on the list """ ret = [self.curse_new_line()] # CPU if 'cpu_percent' in p and p[ 'cpu_percent'] is not None and p['cpu_percent'] != '': if args.disable_irix and self.nb_log_core != 0: msg = self.layout_stat['cpu'].format(p['cpu_percent'] / float(self.nb_log_core)) else: msg = self.layout_stat['cpu'].format(p['cpu_percent']) alert = self.get_alert( p['cpu_percent'], highlight_zero=False, is_max=(p['cpu_percent'] == self.max_values['cpu_percent']), header="cpu") ret.append(self.curse_add_line(msg, alert)) else: msg = self.layout_header['cpu'].format('?') ret.append(self.curse_add_line(msg)) # MEM if 'memory_percent' in p and p[ 'memory_percent'] is not None and p['memory_percent'] != '': msg = self.layout_stat['mem'].format(p['memory_percent']) alert = self.get_alert( p['memory_percent'], highlight_zero=False, is_max=( p['memory_percent'] == self.max_values['memory_percent']), header="mem") ret.append(self.curse_add_line(msg, alert)) else: msg = self.layout_header['mem'].format('?') ret.append(self.curse_add_line(msg)) # VMS/RSS if 'memory_info' in p and p[ 'memory_info'] is not None and p['memory_info'] != '': # VMS msg = self.layout_stat['virt'].format( self.auto_unit(p['memory_info'][1], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) # RSS msg = self.layout_stat['res'].format( self.auto_unit(p['memory_info'][0], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) else: msg = self.layout_header['virt'].format('?') ret.append(self.curse_add_line(msg)) msg = self.layout_header['res'].format('?') ret.append(self.curse_add_line(msg)) # PID msg = self.layout_stat['pid'].format(p['pid'], width=self.__max_pid_size()) ret.append(self.curse_add_line(msg)) # USER if 'username' in p: # docker internal users are displayed as ints only, therefore str() # Correct issue #886 on Windows OS msg = self.layout_stat['user'].format(str(p['username'])[:9]) ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['user'].format('?') ret.append(self.curse_add_line(msg)) # TIME+ try: # Sum user and system time user_system_time = p['cpu_times'][0] + p['cpu_times'][1] except (OverflowError, TypeError) as e: # Catch OverflowError on some Amazon EC2 server # See https://github.com/nicolargo/glances/issues/87 # Also catch TypeError on macOS # See: https://github.com/nicolargo/glances/issues/622 # logger.debug("Cannot get TIME+ ({})".format(e)) msg = self.layout_header['time'].format('?') ret.append(self.curse_add_line(msg, optional=True)) else: hours, minutes, seconds = seconds_to_hms(user_system_time) if hours > 99: msg = '{:<7}h'.format(hours) elif 0 < hours < 100: msg = '{}h{}:{}'.format(hours, minutes, seconds) else: msg = '{}:{}'.format(minutes, seconds) msg = self.layout_stat['time'].format(msg) if hours > 0: ret.append( self.curse_add_line(msg, decoration='CPU_TIME', optional=True)) else: ret.append(self.curse_add_line(msg, optional=True)) # THREAD if 'num_threads' in p: num_threads = p['num_threads'] if num_threads is None: num_threads = '?' msg = self.layout_stat['thread'].format(num_threads) ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['thread'].format('?') ret.append(self.curse_add_line(msg)) # NICE if 'nice' in p: nice = p['nice'] if nice is None: nice = '?' msg = self.layout_stat['nice'].format(nice) ret.append( self.curse_add_line(msg, decoration=self.get_nice_alert(nice))) else: msg = self.layout_header['nice'].format('?') ret.append(self.curse_add_line(msg)) # STATUS if 'status' in p: status = p['status'] msg = self.layout_stat['status'].format(status) if status == 'R': ret.append(self.curse_add_line(msg, decoration='STATUS')) else: ret.append(self.curse_add_line(msg)) else: msg = self.layout_header['status'].format('?') ret.append(self.curse_add_line(msg)) # IO read/write if 'io_counters' in p and p['io_counters'][ 4] == 1 and p['time_since_update'] != 0: # Display rate if stats is available and io_tag ([4]) == 1 # IO read io_rs = int((p['io_counters'][0] - p['io_counters'][2]) / p['time_since_update']) if io_rs == 0: msg = self.layout_stat['ior'].format("0") else: msg = self.layout_stat['ior'].format( self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # IO write io_ws = int((p['io_counters'][1] - p['io_counters'][3]) / p['time_since_update']) if io_ws == 0: msg = self.layout_stat['iow'].format("0") else: msg = self.layout_stat['iow'].format( self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) else: msg = self.layout_header['ior'].format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) msg = self.layout_header['iow'].format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) # Command line # If no command line for the process is available, fallback to # the bare process name instead if 'cmdline' in p: cmdline = p['cmdline'] else: cmdline = '?' try: if cmdline: path, cmd, arguments = split_cmdline(cmdline) if os.path.isdir(path) and not args.process_short_name: msg = self.layout_stat['command'].format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) ret.append( self.curse_add_line(cmd, decoration='PROCESS', splittable=True)) else: msg = self.layout_stat['command'].format(cmd) ret.append( self.curse_add_line(msg, decoration='PROCESS', splittable=True)) if arguments: msg = ' ' + self.layout_stat['command'].format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = self.layout_stat['name'].format(p['name']) ret.append(self.curse_add_line(msg, splittable=True)) except (TypeError, UnicodeEncodeError) as e: # Avoid crach after running fine for several hours #1335 logger.debug("Can not decode command line '{}' ({})".format( cmdline, e)) ret.append(self.curse_add_line('', splittable=True)) # Add extended stats but only for the top processes if first and 'extended_stats' in p and args.enable_process_extended: # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p['cpu_affinity'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'CPU affinity: ' + str(len( p['cpu_affinity'])) + ' cores' ret.append(self.curse_add_line(msg, splittable=True)) # Second line is memory info if 'memory_info' in p and \ p['memory_info'] is not None: ret.append(self.curse_new_line()) msg = '{}Memory info: {}'.format(xpad, p['memory_info']) if 'memory_swap' in p and p['memory_swap'] is not None: msg += ' swap ' + self.auto_unit(p['memory_swap'], low_precision=False) ret.append(self.curse_add_line(msg, splittable=True)) # Third line is for open files/network sessions msg = '' if 'num_threads' in p and p['num_threads'] is not None: msg += str(p['num_threads']) + ' threads ' if 'num_fds' in p and p['num_fds'] is not None: msg += str(p['num_fds']) + ' files ' if 'num_handles' in p and p['num_handles'] is not None: msg += str(p['num_handles']) + ' handles ' if 'tcp' in p and p['tcp'] is not None: msg += str(p['tcp']) + ' TCP ' if 'udp' in p and p['udp'] is not None: msg += str(p['udp']) + ' UDP' if msg != '': ret.append(self.curse_new_line()) msg = xpad + 'Open: ' + msg ret.append(self.curse_add_line(msg, splittable=True)) # Fouth line is IO nice level (only Linux and Windows OS) if 'ionice' in p and \ p['ionice'] is not None \ and hasattr(p['ionice'], 'ioclass'): ret.append(self.curse_new_line()) msg = xpad + 'IO nice: ' k = 'Class is ' v = p['ionice'].ioclass # Linux: The scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Windows: On Windows only ioclass is used and it can be set to 2 (normal), 1 (low) or 0 (very low). if WINDOWS: if v == 0: msg += k + 'Very Low' elif v == 1: msg += k + 'Low' elif v == 2: msg += 'No specific I/O priority' else: msg += k + str(v) else: if v == 0: msg += 'No specific I/O priority' elif v == 1: msg += k + 'Real Time' elif v == 2: msg += k + 'Best Effort' elif v == 3: msg += k + 'IDLE' else: msg += k + str(v) # value is a number which goes from 0 to 7. # The higher the value, the lower the I/O priority of the process. if hasattr(p['ionice'], 'value') and p['ionice'].value != 0: msg += ' (value %s/7)' % str(p['ionice'].value) ret.append(self.curse_add_line(msg, splittable=True)) return ret
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Battery plugin.""" from glances.logger import logger from glances.plugins.glances_plugin import GlancesPlugin # Batinfo library (optional; Linux-only) try: import batinfo except ImportError: logger.debug( "Batinfo library not found. Glances cannot grab battery info.") class Plugin(GlancesPlugin): """Glances battery capacity plugin. stats is a list """ def __init__(self, args=None): """Init the plugin.""" super(Plugin, self).__init__(args=args) # Init the sensor class self.glancesgrabbat = GlancesGrabBat() # We do not want to display the stat in a dedicated area
def display(self, stats, cs_status=None): """Display stats on the screen. stats: Stats database to display cs_status: "None": standalone or server mode "Connected": Client is connected to a Glances server "SNMP": Client is connected to a SNMP server "Disconnected": Client is disconnected from the server Return: True if the stats have been displayed False if the help have been displayed """ # Init the internal line/column for Glances Curses self.init_line_column() # Update the stats messages ########################### # Get all the plugins but quicklook and proceslist self.args.cs_status = cs_status __stat_display = self.__get_stat_display(stats, layer=cs_status) # Adapt number of processes to the available space max_processes_displayed = ( self.screen.getmaxyx()[0] - 11 - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"])) - (0 if 'processcount' not in __stat_display else self.get_stats_display_height(__stat_display["processcount"])) - (0 if 'amps' not in __stat_display else self.get_stats_display_height(__stat_display["amps"])) - (0 if 'alert' not in __stat_display else self.get_stats_display_height(__stat_display["alert"]))) try: if self.args.enable_process_extended: max_processes_displayed -= 4 except AttributeError: pass if max_processes_displayed < 0: max_processes_displayed = 0 if (glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed): logger.debug("Set number of displayed processes to {}".format( max_processes_displayed)) glances_processes.max_processes = max_processes_displayed # Get the processlist __stat_display["processlist"] = stats.get_plugin( 'processlist').get_stats_display(args=self.args) # Display the stats on the curses interface ########################################### # Help screen (on top of the other stats) if self.args.help_tag: # Display the stats... self.display_plugin( stats.get_plugin('help').get_stats_display(args=self.args)) # ... and exit return False # ===================================== # Display first line (system+ip+uptime) # Optionnaly: Cloud on second line # ===================================== self.__display_header(__stat_display) # ============================================================== # Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP) # ============================================================== self.__display_top(__stat_display, stats) # ================================================================== # Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time) # ================================================================== self.__display_left(__stat_display) # ==================================== # Display right stats (process and co) # ==================================== self.__display_right(__stat_display) # ===================== # Others popup messages # ===================== # Display edit filter popup # Only in standalone mode (cs_status is None) if self.edit_filter and cs_status is None: new_filter = self.display_popup( 'Process filter pattern: \n\n' + 'Examples:\n' + '- python\n' + '- .*python.*\n' + '- /usr/lib.*\n' + '- name:.*nautilus.*\n' + '- cmdline:.*glances.*\n' + '- username:nicolargo\n' + '- username:^root ', is_input=True, input_value=glances_processes.process_filter_input) glances_processes.process_filter = new_filter elif self.edit_filter and cs_status is not None: self.display_popup( 'Process filter only available in standalone mode') self.edit_filter = False # Display graph generation popup if self.args.generate_graph: self.display_popup('Generate graph in {}'.format( self.args.export_graph_path)) return True