def log_and_exit(self, msg=''): """Log and exit.""" if not self.return_to_browser: logger.critical(msg) sys.exit(2) else: logger.error(msg)
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {0} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input new_col = [] for col in columns: if col[0:2] == "/.": new_col.append(col[2:]) else: new_col.append(col) if self.version == INFLUXDB_09: data = [{'measurement': name, 'tags': self.tags, 'fields': dict(zip(new_col, points))}] else: data = [{'name': name, 'columns': new_col, 'points': [points]}] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {0} stats to InfluxDB ({1})".format(name, e))
def __set_monitor_list(self, section, key): """Init the monitored processes list. The list is defined in the Glances configuration file. """ for l in range(1, self.__monitor_list_max_size + 1): value = {} key = "list_" + str(l) + "_" try: description = self.config.get_raw_option(section, key + "description") regex = self.config.get_raw_option(section, key + "regex") command = self.config.get_raw_option(section, key + "command") countmin = self.config.get_raw_option(section, key + "countmin") countmax = self.config.get_raw_option(section, key + "countmax") except Exception as e: logger.error("Cannot read monitored list: {0}".format(e)) pass else: if description is not None and regex is not None: # Build the new item value["description"] = description try: re.compile(regex) except Exception: continue else: value["regex"] = regex value["command"] = command value["countmin"] = countmin value["countmax"] = countmax value["count"] = None value["result"] = None # Add the item to the list self.__monitor_list.append(value)
def __init__(self, hostname, args=None): if zeroconf_tag: zeroconf_bind_address = args.bind_address try: self.zeroconf = Zeroconf() except socket.error as e: logger.error("Cannot start zeroconf: {0}".format(e)) try: # -B @ overwrite the dynamic IPv4 choice if zeroconf_bind_address == '0.0.0.0': zeroconf_bind_address = self.find_active_ip_address() except KeyError: # Issue #528 (no network interface available) pass print( "Announce the Glances server on the LAN (using {0} IP address)" .format(zeroconf_bind_address)) self.info = ServiceInfo( zeroconf_type, '{0}:{1}.{2}'.format(hostname, args.port, zeroconf_type), address=socket.inet_aton(zeroconf_bind_address), port=args.port, weight=0, priority=0, properties={}, server=hostname) self.zeroconf.register_service(self.info) else: logger.error( "Cannot announce Glances server on the network: zeroconf library not found." )
def get_docker_cpu(self, id): """Return the container CPU usage by reading /sys/fs/cgroup/... Input: id is the full container id Output: a dict {'total': 1.49, 'user': 0.65, 'system': 0.84}""" ret = {} # Read the stats try: with open('/sys/fs/cgroup/cpuacct/docker/' + id + '/cpuacct.stat', 'r') as f: for line in f: m = re.search(r"(system|user)\s+(\d+)", line) if m: ret[m.group(1)] = int(m.group(2)) except IOError as e: logger.error("Can not grab container CPU stat ({0})".format(e)) return ret # Get the user ticks ticks = self.get_user_ticks() if isinstance(ret["system"], numbers.Number) and isinstance( ret["user"], numbers.Number): ret["total"] = ret["system"] + ret["user"] for k in ret.keys(): ret[k] = float(ret[k]) / ticks # Return the stats return ret
def log_and_exit(self, msg=''): """Log and (exit)""" if not self.return_to_browser: logger.critical(msg) sys.exit(2) else: logger.error(msg)
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {0} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input new_col = [] for col in columns: if col[0:2] == "/.": new_col.append(col[2:]) else: new_col.append(col) if self.version == INFLUXDB_09: data = [{ 'measurement': name, 'tags': self.tags, 'fields': dict(zip(new_col, points)) }] else: data = [{'name': name, 'columns': new_col, 'points': [points]}] # Write input to the InfluxDB database from pprint import pprint pprint(data) pprint(self.tags) try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {0} stats to InfluxDB ({1})".format( name, e))
def __set_monitor_list(self, section, key): """Init the monitored processes list. The list is defined in the Glances configuration file. """ for l in range(1, self.__monitor_list_max_size + 1): value = {} key = "list_" + str(l) + "_" try: description = self.config.get_value(section, key + 'description') regex = self.config.get_value(section, key + 'regex') command = self.config.get_value(section, key + 'command') countmin = self.config.get_value(section, key + 'countmin') countmax = self.config.get_value(section, key + 'countmax') except Exception as e: logger.error("Cannot read monitored list: {0}".format(e)) else: if description is not None and regex is not None: # Build the new item value["description"] = description try: re.compile(regex) except Exception: continue else: value["regex"] = regex value["command"] = command value["countmin"] = countmin value["countmax"] = countmax value["count"] = None value["result"] = None # Add the item to the list self.__monitor_list.append(value)
def __init__(self, hostname, args=None): if zeroconf_tag: zeroconf_bind_address = args.bind_address try: self.zeroconf = Zeroconf() except socket.error as e: logger.error("Cannot start zeroconf: {0}".format(e)) # XXX FreeBSD: Segmentation fault (core dumped) # -- https://bitbucket.org/al45tair/netifaces/issues/15 if not is_freebsd: try: # -B @ overwrite the dynamic IPv4 choice if zeroconf_bind_address == '0.0.0.0': zeroconf_bind_address = self.find_active_ip_address() except KeyError: # Issue #528 (no network interface available) pass print("Announce the Glances server on the LAN (using {0} IP address)".format(zeroconf_bind_address)) self.info = ServiceInfo( zeroconf_type, '{0}:{1}.{2}'.format(hostname, args.port, zeroconf_type), address=socket.inet_aton(zeroconf_bind_address), port=args.port, weight=0, priority=0, properties={}, server=hostname) self.zeroconf.register_service(self.info) else: logger.error("Cannot announce Glances server on the network: zeroconf library not found.")
def load(self, config): """Load the server list from the configuration file""" server_list = [] if config is None: logger.warning( "No configuration file available. Cannot load server list.") elif not config.has_section(self._section): logger.warning( "No [%s] section in the configuration file. Cannot load server list." % self._section) else: logger.info( "Start reading the [%s] section in the configuration file" % self._section) for i in range(1, 256): new_server = {} postfix = 'server_%s_' % str(i) # Read the server name (mandatory) for s in ['name', 'port', 'alias']: new_server[s] = config.get_raw_option( self._section, '%s%s' % (postfix, s)) if new_server['name'] is not None: # Manage optionnal information if new_server['port'] is None: new_server['port'] = 61209 new_server['username'] = '******' new_server['password'] = '' try: new_server['ip'] = gethostbyname(new_server['name']) except gaierror as e: logger.error( "Cannot get IP address for server %s (%s)" % (new_server['name'], e)) continue new_server[ 'key'] = new_server['name'] + ':' + new_server['port'] # Default status is 'UNKNOWN' new_server['status'] = 'UNKNOWN' # Server type is 'STATIC' new_server['type'] = 'STATIC' # Add the server to the list logger.debug("Add server %s to the static list" % new_server['name']) server_list.append(new_server) # Server list loaded logger.info("%s server(s) loaded from the configuration file" % len(server_list)) logger.debug("Static server list: %s" % server_list) return server_list
def update(self): """Update the stats using SNMP.""" # For each plugins, call the update method for p in self._plugins: # Set the input method to SNMP self._plugins[p].set_input('snmp', self.system_name) try: self._plugins[p].update() except Exception as e: logger.error("Update {0} failed: {1}".format(p, e))
def get_item_key(self, item): """Return the value of the item 'key'.""" try: ret = item[item['key']] except KeyError: logger.error("No 'key' available in {0}".format(item)) if isinstance(ret, list): return ret[0] else: return ret
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(0, len(columns)): if not isinstance(points[i], Number): continue stat_name = '{0}.{1}'.format(name, columns[i]) stat_value = points[i] try: self.client.gauge(stat_name, stat_value) except Exception as e: logger.error("Can not export stats to Statsd (%s)" % e)
def export(self, name, columns, points): """Export the stats to the Statsd server""" for i in range(0, len(columns)): if not isinstance(points[i], Number): continue stat_name = '{0}.{1}'.format(name, columns[i]) stat_value = points[i] try: self.client.gauge(stat_name, stat_value) except Exception as e: logger.error("Can not export stats to Statsd (%s)" % e)
def __init__(self, bind_address, bind_port=61209, requestHandler=GlancesXMLRPCHandler): try: self.address_family = socket.getaddrinfo(bind_address, bind_port)[0][0] except socket.error as e: logger.error("Couldn't open socket: {0}".format(e)) sys.exit(1) SimpleXMLRPCServer.__init__(self, (bind_address, bind_port), requestHandler)
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(0, len(columns)): if not isinstance(points[i], Number): continue stat_name = '{0}.{1}.{2}'.format(self.prefix, name, columns[i]) stat_value = points[i] try: self.client.send(stat_name, stat_value, **self.tags) except Exception as e: logger.error("Can not export stats %s to OpenTSDB (%s)" % (name, e)) logger.debug("Export {0} stats to OpenTSDB".format(name))
def remove_server(self, name): """Remove a server from the dict.""" for i in self._server_list: if i['key'] == name: try: self._server_list.remove(i) logger.debug("Remove server %s from the list" % name) logger.debug("Updated servers list (%s servers): %s" % ( len(self._server_list), self._server_list)) except ValueError: logger.error( "Cannot remove server %s from the list" % name)
def export(self, name, columns, points): """Write the points to the InfluxDB server""" data = [ { "name": name, "columns": columns, "points": [points] }] try: self.client.write_points(data) except Exception as e: logger.error("Can not export stats to InfluxDB (%s)" % e)
def remove_server(self, name): """Remove a server from the dict""" for i in self._server_list: if i['key'] == name: try: self._server_list.remove(i) logger.debug("Remove server %s from the list" % name) logger.debug("Updated servers list (%s servers): %s" % ( len(self._server_list), self._server_list)) except ValueError: logger.error( "Cannot remove server %s from the list" % name)
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input data = [{'name': name, 'columns': columns, 'points': [points]}] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Can not export stats to InfluxDB (%s)" % e)
def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format(self.process_filter)) except Exception: logger.error("Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None
def export(self, name, columns, points): """Write the points in RabbitMQ.""" data = ('hostname=' + self.hostname + ', name=' + name + ', dateinfo=' + datetime.datetime.utcnow().isoformat()) for i in range(0, len(columns)): if not isinstance(points[i], Number): continue else: data += ", " + columns[i] + "=" + str(points[i]) logger.debug(data) try: self.client.basic_publish(exchange='', routing_key=self.rabbitmq_queue, body=data) except Exception as e: logger.error("Can not export stats to RabbitMQ (%s)" % e)
def save_password(self, hashed_password): """Save the hashed password to the Glances folder.""" # Check if the Glances folder already exists if not os.path.exists(self.password_path): # Create the Glances folder try: os.makedirs(self.password_path) except OSError as e: logger.error("Cannot create Glances directory: {0}".format(e)) return # Create/overwrite the password file with open(self.password_filepath, 'w') as file_pwd: file_pwd.write(hashed_password)
def export(self, name, columns, points): """Write the points in RabbitMQ.""" data = {} data['hostname'] = self.hostname for i in range(0, len(columns)): if not isinstance(points[i], Number): continue else: data[columns[i]] = str(points[i]) try: requests.post(self.uri, data=json.dumps(data)) except Exception as e: logger.error("Can not export stats to Elasticsearch (%s)" % e)
def connect(self, version=None): """Connect to the Docker server""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version import re version = re.search('server\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug( "Docker plugin is disable because an error has been detected") return ret
def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format( self.process_filter)) except Exception: logger.error( "Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None
def get_stats_value(self, item, value): """Return the stats object for a specific item=value in JSON format. Stats should be a list of dict (processlist, network...) """ if not isinstance(self.stats, list): return None else: if value.isdigit(): value = int(value) try: return json.dumps({value: [i for i in self.stats if i[item] == value]}) except (KeyError, ValueError) as e: logger.error( "Cannot get item({0})=value({1}) ({2})".format(item, value, e)) return None
def load(self): """Load a config file from the list of paths, if it exists.""" for config_file in self.get_config_paths(): if os.path.isfile(config_file) and os.path.getsize(config_file) > 0: try: if is_py3: self.parser.read(config_file, encoding='utf-8') else: self.parser.read(config_file) logger.info("Read configuration file '{0}'".format(config_file)) except UnicodeDecodeError as e: logger.error("Cannot decode configuration file '{0}': {1}".format(config_file, e)) sys.exit(1) # Save the loaded configuration file path (issue #374) self._loaded_config_file = config_file break
def __init__(self, args=None): if zeroconf_tag: logger.info("Init autodiscover mode (Zeroconf protocol)") try: self.zeroconf = Zeroconf() except socket.error as e: logger.error("Cannot start Zeroconf (%s)" % e) self.zeroconf_enable_tag = False else: self.listener = GlancesAutoDiscoverListener() self.browser = ServiceBrowser( self.zeroconf, zeroconf_type, self.listener) self.zeroconf_enable_tag = True else: logger.error("Cannot start autodiscover mode (Zeroconf lib is not installed)") self.zeroconf_enable_tag = False
def connect(self, version=None): """Connect to the Docker server""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version import re version = re.search('server\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug("Docker plugin is disable because an error has been detected") return ret
def update(self): """Update sensors stats using the input method.""" # Reset the stats self.reset() if self.input_method == 'local': # Update stats using the dedicated lib self.stats = [] # Get the temperature try: temperature = self.__set_type( self.glancesgrabsensors.get('temperature_core'), 'temperature_core') except Exception as e: logger.error("Cannot grab sensors temperatures (%s)" % e) else: # Append temperature self.stats.extend(temperature) # Get the FAN speed try: fan_speed = self.__set_type( self.glancesgrabsensors.get('fan_speed'), 'fan_speed') except Exception as e: logger.error("Cannot grab FAN speed (%s)" % e) else: # Append FAN speed self.stats.extend(fan_speed) # Update HDDtemp stats try: hddtemp = self.__set_type(self.hddtemp_plugin.update(), 'temperature_hdd') except Exception as e: logger.error("Cannot grab HDD temperature (%s)" % e) else: # Append HDD temperature self.stats.extend(hddtemp) # Update batteries stats try: batpercent = self.__set_type(self.batpercent_plugin.update(), 'battery') except Exception as e: logger.error("Cannot grab battery percent (%s)" % e) else: # Append Batteries % self.stats.extend(batpercent) elif self.input_method == 'snmp': # Update stats using SNMP # No standard: # http://www.net-snmp.org/wiki/index.php/Net-SNMP_and_lm-sensors_on_Ubuntu_10.04 pass # Update the view self.update_views() return self.stats
def get_stats_value(self, item, value): """ Return the stats object for a specific item=value (in JSON format) Stats should be a list of dict (processlist, network...) """ if type(self.stats) is not list: return None else: if value.isdigit(): value = int(value) try: return json.dumps( {value: [i for i in self.stats if i[item] == value]}) except (KeyError, ValueError) as e: logger.error("Cannot get item({0})=value({1}) ({2})".format( item, value, e)) return None
def get_docker_memory(self, id): """Return the container MEMORY usage by reading /sys/fs/cgroup/... Input: id is the full container id Output: a dict {'rss': 1015808, 'cache': 356352}""" ret = {} # Read the stats try: with open('/sys/fs/cgroup/memory/docker/' + id + '/memory.stat', 'r') as f: for line in f: m = re.search(r"(rss|cache)\s+(\d+)", line) if m: ret[m.group(1)] = int(m.group(2)) except IOError as e: logger.error("Can not grab container MEM stat ({0})".format(e)) return ret # Return the stats return ret
def update(self): """Update sensors stats using the input method.""" # Reset the stats self.reset() if self.input_method == 'local': # Update stats using the dedicated lib self.stats = [] # Get the temperature try: temperature = self.__set_type(self.glancesgrabsensors.get('temperature_core'), 'temperature_core') except Exception as e: logger.error("Cannot grab sensors temperatures (%s)" % e) else: # Append temperature self.stats.extend(temperature) # Get the FAN speed try: fan_speed = self.__set_type(self.glancesgrabsensors.get('fan_speed'), 'fan_speed') except Exception as e: logger.error("Cannot grab FAN speed (%s)" % e) else: # Append FAN speed self.stats.extend(fan_speed) # Update HDDtemp stats try: hddtemp = self.__set_type(self.hddtemp_plugin.update(), 'temperature_hdd') except Exception as e: logger.error("Cannot grab HDD temperature (%s)" % e) else: # Append HDD temperature self.stats.extend(hddtemp) # Update batteries stats try: batpercent = self.__set_type(self.batpercent_plugin.update(), 'battery') except Exception as e: logger.error("Cannot grab battery percent (%s)" % e) else: # Append Batteries % self.stats.extend(batpercent) elif self.input_method == 'snmp': # Update stats using SNMP # No standard: # http://www.net-snmp.org/wiki/index.php/Net-SNMP_and_lm-sensors_on_Ubuntu_10.04 pass # Update the view self.update_views() return self.stats
def load(self, config): """Load the server list from the configuration file""" server_list = [] if config is None: logger.warning("No configuration file available. Cannot load server list.") elif not config.has_section(self._section): logger.warning("No [%s] section in the configuration file. Cannot load server list." % self._section) else: logger.info("Start reading the [%s] section in the configuration file" % self._section) for i in range(1, 256): new_server = {} postfix = 'server_%s_' % str(i) # Read the server name (mandatory) for s in ['name', 'port', 'alias']: new_server[s] = config.get_raw_option(self._section, '%s%s' % (postfix, s)) if new_server['name'] is not None: # Manage optionnal information if new_server['port'] is None: new_server['port'] = 61209 new_server['username'] = '******' new_server['password'] = '' try: new_server['ip'] = gethostbyname(new_server['name']) except gaierror as e: logger.error("Cannot get IP address for server %s (%s)" % (new_server['name'], e)) continue new_server['key'] = new_server['name'] + ':' + new_server['port'] # Default status is 'UNKNOWN' new_server['status'] = 'UNKNOWN' # Server type is 'STATIC' new_server['type'] = 'STATIC' # Add the server to the list logger.debug("Add server %s to the static list" % new_server['name']) server_list.append(new_server) # Server list loaded logger.info("%s server(s) loaded from the configuration file" % len(server_list)) logger.debug("Static server list: %s" % server_list) return server_list
def get_stats_item(self, item): """ Return the stats object for a specific item (in JSON format) Stats should be a list of dict (processlist, network...) """ if type(self.stats) is not list: if type(self.stats) is dict: try: return json.dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {0} ({1})".format(item, e)) else: return None else: try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return json.dumps({item: map(itemgetter(item), self.stats)}) except (KeyError, ValueError) as e: logger.error("Cannot get item {0} ({1})".format(item, e)) return None
def get_stats_item(self, item): """Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...) """ if isinstance(self.stats, dict): try: return json.dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {0} ({1})".format(item, e)) return None elif isinstance(self.stats, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return json.dumps({item: map(itemgetter(item), self.stats)}) except (KeyError, ValueError) as e: logger.error("Cannot get item {0} ({1})".format(item, e)) return None else: return None
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {0} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input if self.version == INFLUXDB_09: data = [{ 'measurement': name, 'tags': self.tags, 'fields': dict(zip(columns, points)) }] else: data = [{'name': name, 'columns': columns, 'points': [points]}] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {0} stats to InfluxDB ({1})".format( name, e))
def run(self, stat_name, criticity, commands, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if self.get(stat_name) == criticity: # Action already executed => Exit return False logger.debug("Run action {0} for {1} ({2}) with stats {3}".format( commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {0} ({1}): {2}".format( stat_name, criticity, cmd_full)) logger.debug( "Stats value for the trigger: {0}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({0})".format(e)) self.set(stat_name, criticity) return True
def update(self): """Update Docker stats using the input method.""" # Reset stats self.reset() # Get the current Docker API client if not self.docker_client: # First time, try to connect to the server self.docker_client = self.connect() if self.docker_client is None: global docker_tag docker_tag = False # The Docker-py lib is mandatory if not docker_tag or (self.args is not None and self.args.disable_docker): return self.stats if self.input_method == 'local': # Update stats # Docker version # Exemple: { # "KernelVersion": "3.16.4-tinycore64", # "Arch": "amd64", # "ApiVersion": "1.15", # "Version": "1.3.0", # "GitCommit": "c78088f", # "Os": "linux", # "GoVersion": "go1.3.3" # } try: self.stats['version'] = self.docker_client.version() except Exception as e: # Correct issue#649 logger.error("{0} plugin - Cannot get Docker version ({1})".format(self.plugin_name, e)) return self.stats # Container globals information # Example: [{u'Status': u'Up 36 seconds', # u'Created': 1420378904, # u'Image': u'nginx:1', # u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443}, # {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}], # u'Command': u"nginx -g 'daemon off;'", # u'Names': [u'/webstack_nginx_1'], # u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}] # Update current containers list try: self.stats['containers'] = self.docker_client.containers() or [] except Exception as e: logger.error("{0} plugin - Cannot get containers list ({1})".format(self.plugin_name, e)) return self.stats # Start new thread for new container for container in self.stats['containers']: if container['Id'] not in self.thread_list: # Thread did not exist in the internal dict # Create it and add it to the internal dict logger.debug("{0} plugin - Create thread for container {1}".format(self.plugin_name, container['Id'][:12])) t = ThreadDockerGrabber(self.docker_client, container['Id']) self.thread_list[container['Id']] = t t.start() # Stop threads for non-existing containers nonexisting_containers = list(set(self.thread_list.keys()) - set([c['Id'] for c in self.stats['containers']])) for container_id in nonexisting_containers: # Stop the thread logger.debug("{0} plugin - Stop thread for old container {1}".format(self.plugin_name, container_id[:12])) self.thread_list[container_id].stop() # Delete the item from the dict del(self.thread_list[container_id]) # Get stats for all containers for container in self.stats['containers']: # The key is the container name and not the Id container['key'] = self.get_key() # Export name (first name in the list, without the /) container['name'] = container['Names'][0][1:] container['cpu'] = self.get_docker_cpu(container['Id'], self.thread_list[container['Id']].stats) container['memory'] = self.get_docker_memory(container['Id'], self.thread_list[container['Id']].stats) container['network'] = self.get_docker_network(container['Id'], self.thread_list[container['Id']].stats) container['io'] = self.get_docker_io(container['Id'], self.thread_list[container['Id']].stats) elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass return self.stats
def login(self): """Logon to the server.""" ret = True if not self.args.snmp_force: # First of all, trying to connect to a Glances server self.set_mode('glances') client_version = None try: client_version = self.client.init() except socket.error as err: # Fallback to SNMP logger.error("Connection to Glances server failed (%s)" % err) self.set_mode('snmp') fallbackmsg = _("Trying fallback to SNMP...") if not self.return_to_browser: print(fallbackmsg) else: logger.info(fallbackmsg) except ProtocolError as err: # Others errors if str(err).find(" 401 ") > 0: msg = "Connection to server failed (bad password)" else: msg = "Connection to server failed ({0})".format(err) self.log_and_exit(msg) return False if self.get_mode() == 'glances' and version.split( '.')[0] == client_version.split('.')[0]: # Init stats self.stats = GlancesStatsClient() self.stats.set_plugins(json.loads(self.client.getAllPlugins())) logger.debug("Client version: %s / Server version: %s" % (version, client_version)) elif self.get_mode() == 'glances': self.log_and_exit( "Client and server not compatible: Client version: %s / Server version: %s" % (version, client_version)) return False else: self.set_mode('snmp') if self.get_mode() == 'snmp': logger.info("Trying to grab stats by SNMP...") # Fallback to SNMP if needed from glances.core.glances_stats import GlancesStatsClientSNMP # Init stats self.stats = GlancesStatsClientSNMP(args=self.args) if not self.stats.check_snmp(): self.log_and_exit("Connection to SNMP server failed") return False if ret: # Load limits from the configuration file # Each client can choose its owns limits self.stats.load_limits(self.config) # Init screen self.screen = GlancesCursesClient(args=self.args) # Return result return ret
def update(self): """ Update the processes stats """ # Reset the stats self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} for proc in psutil.process_iter(): # Ignore kernel threads if needed if (self.no_kernel_threads and (not is_windows) and is_kernel_thread(proc)): continue # If self.get_max_processes() is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.get_max_processes() is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (is_bsd and processdict[proc]['name'] == 'idle' or is_windows and processdict[proc]['name'] == 'System Idle Process' or is_mac and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.getsortkey(), self.no_kernel_threads) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (get_max_processes) if (self.get_max_processes() is not None) and (i >= self.get_max_processes()): break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (get_max_processes) if self.get_max_processes() is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(processdict.items(), key=lambda x: x[1][self.getsortkey()], reverse=True) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by %s (%s)" % (self.getsortkey(), e)) logger.error("%s" % str(processdict.items()[0])) # Fallback to all process (issue #423) processloop = processdict.items() first = False else: processloop = processiter[0:self.get_max_processes()] first = True else: # Get all processes stats processloop = processdict.items() first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.get_max_processes() is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def __init__(self, args=None): # Init args self.args = args # Init windows positions self.term_w = 80 self.term_h = 24 # Space between stats self.space_between_column = 3 self.space_between_line = 2 # Init the curses screen self.screen = curses.initscr() if not self.screen: logger.critical("Cannot init the curses library.\n") sys.exit(1) # Set curses options if hasattr(curses, 'start_color'): curses.start_color() if hasattr(curses, 'use_default_colors'): curses.use_default_colors() if hasattr(curses, 'noecho'): curses.noecho() if hasattr(curses, 'cbreak'): curses.cbreak() self.set_cursor(0) # Init colors self.hascolors = False if curses.has_colors() and curses.COLOR_PAIRS > 8: self.hascolors = True # FG color, BG color if args.theme_white: curses.init_pair(1, curses.COLOR_BLACK, -1) else: curses.init_pair(1, curses.COLOR_WHITE, -1) curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED) curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN) curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE) curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA) curses.init_pair(6, curses.COLOR_RED, -1) curses.init_pair(7, curses.COLOR_GREEN, -1) curses.init_pair(8, curses.COLOR_BLUE, -1) try: curses.init_pair(9, curses.COLOR_MAGENTA, -1) except Exception: if args.theme_white: curses.init_pair(9, curses.COLOR_BLACK, -1) else: curses.init_pair(9, curses.COLOR_WHITE, -1) try: curses.init_pair(10, curses.COLOR_CYAN, -1) except Exception: if args.theme_white: curses.init_pair(10, curses.COLOR_BLACK, -1) else: curses.init_pair(10, curses.COLOR_WHITE, -1) else: self.hascolors = False if args.disable_bold: A_BOLD = curses.A_BOLD else: A_BOLD = 0 self.title_color = A_BOLD self.title_underline_color = A_BOLD | curses.A_UNDERLINE self.help_color = A_BOLD if self.hascolors: # Colors text styles self.no_color = curses.color_pair(1) self.default_color = curses.color_pair(3) | A_BOLD self.nice_color = curses.color_pair(9) | A_BOLD self.ifCAREFUL_color = curses.color_pair(4) | A_BOLD self.ifWARNING_color = curses.color_pair(5) | A_BOLD self.ifCRITICAL_color = curses.color_pair(2) | A_BOLD self.default_color2 = curses.color_pair(7) | A_BOLD self.ifCAREFUL_color2 = curses.color_pair(8) | A_BOLD self.ifWARNING_color2 = curses.color_pair(9) | A_BOLD self.ifCRITICAL_color2 = curses.color_pair(6) | A_BOLD self.filter_color = curses.color_pair(10) | A_BOLD else: # B&W text styles self.no_color = curses.A_NORMAL self.default_color = curses.A_NORMAL self.nice_color = A_BOLD self.ifCAREFUL_color = curses.A_UNDERLINE self.ifWARNING_color = A_BOLD self.ifCRITICAL_color = curses.A_REVERSE self.default_color2 = curses.A_NORMAL self.ifCAREFUL_color2 = curses.A_UNDERLINE self.ifWARNING_color2 = A_BOLD self.ifCRITICAL_color2 = curses.A_REVERSE self.filter_color = A_BOLD # Define the colors list (hash table) for stats self.colors_list = { 'DEFAULT': self.no_color, 'UNDERLINE': curses.A_UNDERLINE, 'BOLD': A_BOLD, 'SORT': A_BOLD, 'OK': self.default_color2, 'FILTER': self.filter_color, 'TITLE': self.title_color, 'PROCESS': self.default_color2, 'STATUS': self.default_color2, 'NICE': self.nice_color, 'CAREFUL': self.ifCAREFUL_color2, 'WARNING': self.ifWARNING_color2, 'CRITICAL': self.ifCRITICAL_color2, 'OK_LOG': self.default_color, 'CAREFUL_LOG': self.ifCAREFUL_color, 'WARNING_LOG': self.ifWARNING_color, 'CRITICAL_LOG': self.ifCRITICAL_color } # Init main window self.term_window = self.screen.subwin(0, 0) # Init refresh time self.__refresh_time = args.time # Init process sort method self.args.process_sorted_by = 'auto' # Init edit filter tag self.edit_filter = False # Catch key pressed with non blocking mode self.term_window.keypad(1) self.term_window.nodelay(1) self.pressedkey = -1 # History tag self.reset_history_tag = False self.history_tag = False if args.enable_history: logger.info('Stats history enabled with output path %s' % args.path_history) from glances.exports.glances_history import GlancesHistory self.glances_history = GlancesHistory(args.path_history) if not self.glances_history.graph_enabled(): args.enable_history = False logger.error( 'Stats history disabled because MatPlotLib is not installed')
def read(self): """Read the config file, if it exists. Using defaults otherwise.""" for config_file in self.config_file_paths(): if os.path.exists(config_file): try: if is_py3: self.parser.read(config_file, encoding='utf-8') else: self.parser.read(config_file) logger.info( "Read configuration file '{0}'".format(config_file)) except UnicodeDecodeError as err: logger.error( "Cannot decode configuration file '{0}': {1}".format( config_file, err)) sys.exit(1) # Save the loaded configuration file path (issue #374) self._loaded_config_file = config_file break # Quicklook if not self.parser.has_section('quicklook'): self.parser.add_section('quicklook') self.parser.set('quicklook', 'cpu_careful', '50') self.parser.set('quicklook', 'cpu_warning', '70') self.parser.set('quicklook', 'cpu_critical', '90') self.parser.set('quicklook', 'mem_careful', '50') self.parser.set('quicklook', 'mem_warning', '70') self.parser.set('quicklook', 'mem_critical', '90') self.parser.set('quicklook', 'swap_careful', '50') self.parser.set('quicklook', 'swap_warning', '70') self.parser.set('quicklook', 'swap_critical', '90') # CPU if not self.parser.has_section('cpu'): self.parser.add_section('cpu') self.parser.set('cpu', 'user_careful', '50') self.parser.set('cpu', 'user_warning', '70') self.parser.set('cpu', 'user_critical', '90') self.parser.set('cpu', 'iowait_careful', '50') self.parser.set('cpu', 'iowait_warning', '70') self.parser.set('cpu', 'iowait_critical', '90') self.parser.set('cpu', 'system_careful', '50') self.parser.set('cpu', 'system_warning', '70') self.parser.set('cpu', 'system_critical', '90') self.parser.set('cpu', 'steal_careful', '50') self.parser.set('cpu', 'steal_warning', '70') self.parser.set('cpu', 'steal_critical', '90') # Per-CPU if not self.parser.has_section('percpu'): self.parser.add_section('percpu') self.parser.set('percpu', 'user_careful', '50') self.parser.set('percpu', 'user_warning', '70') self.parser.set('percpu', 'user_critical', '90') self.parser.set('percpu', 'iowait_careful', '50') self.parser.set('percpu', 'iowait_warning', '70') self.parser.set('percpu', 'iowait_critical', '90') self.parser.set('percpu', 'system_careful', '50') self.parser.set('percpu', 'system_warning', '70') self.parser.set('percpu', 'system_critical', '90') # Load if not self.parser.has_section('load'): self.parser.add_section('load') self.parser.set('load', 'careful', '0.7') self.parser.set('load', 'warning', '1.0') self.parser.set('load', 'critical', '5.0') # Mem if not self.parser.has_section('mem'): self.parser.add_section('mem') self.parser.set('mem', 'careful', '50') self.parser.set('mem', 'warning', '70') self.parser.set('mem', 'critical', '90') # Swap if not self.parser.has_section('memswap'): self.parser.add_section('memswap') self.parser.set('memswap', 'careful', '50') self.parser.set('memswap', 'warning', '70') self.parser.set('memswap', 'critical', '90') # FS if not self.parser.has_section('fs'): self.parser.add_section('fs') self.parser.set('fs', 'careful', '50') self.parser.set('fs', 'warning', '70') self.parser.set('fs', 'critical', '90') # Sensors if not self.parser.has_section('sensors'): self.parser.add_section('sensors') self.parser.set('sensors', 'temperature_core_careful', '60') self.parser.set('sensors', 'temperature_core_warning', '70') self.parser.set('sensors', 'temperature_core_critical', '80') self.parser.set('sensors', 'temperature_hdd_careful', '45') self.parser.set('sensors', 'temperature_hdd_warning', '52') self.parser.set('sensors', 'temperature_hdd_critical', '60') self.parser.set('sensors', 'battery_careful', '80') self.parser.set('sensors', 'battery_warning', '90') self.parser.set('sensors', 'battery_critical', '95') # Process list if not self.parser.has_section('processlist'): self.parser.add_section('processlist') self.parser.set('processlist', 'cpu_careful', '50') self.parser.set('processlist', 'cpu_warning', '70') self.parser.set('processlist', 'cpu_critical', '90') self.parser.set('processlist', 'mem_careful', '50') self.parser.set('processlist', 'mem_warning', '70') self.parser.set('processlist', 'mem_critical', '90')