def __update__(self): """Update the stats.""" # Reset the list self.reset() if self.initok: for chip in sensors.iter_detected_chips(): for feature in chip: sensors_current = {} if feature.name.startswith(b'temp'): # Temperature sensor sensors_current['unit'] = SENSOR_TEMP_UNIT elif feature.name.startswith(b'fan'): # Fan speed sensor sensors_current['unit'] = SENSOR_FAN_UNIT if sensors_current: try: sensors_current['label'] = feature.label sensors_current['value'] = int(feature.get_value()) except SensorsError as e: logger.debug("Cannot grab sensor stat(%s)" % e) else: self.sensors_list.append(sensors_current) return self.sensors_list
def load_limits(self, config): """Load limits from the configuration file, if it exists.""" # By default set the history length to 3 points per second during one day self._limits['history_size'] = 28800 if not hasattr(config, 'has_section'): return False # Read the global section if config.has_section('global'): self._limits['history_size'] = config.get_float_value( 'global', 'history_size', default=28800) logger.debug("Load configuration key: {0} = {1}".format( 'history_size', self._limits['history_size'])) # Read the plugin specific section if config.has_section(self.plugin_name): for level, _ in config.items(self.plugin_name): # Read limits limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value( self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value( self.plugin_name, level).split(",") logger.debug("Load limit: {} = {}".format( limit, self._limits[limit])) return True
def load_exports(self, args=None): """Load all export modules in the 'exports' folder.""" if args is None: return False header = "glances_" # Transform the arguments list into a dict # The aim is to chec if the export module should be loaded args_var = vars(locals()['args']) for item in os.listdir(exports_path): export_name = os.path.basename(item)[len(header):-3].lower() if (item.startswith(header) and item.endswith(".py") and item != (header + "export.py") and item != (header + "history.py") and args_var['export_' + export_name] is not None and args_var['export_' + export_name] is not False): # Import the export module export_module = __import__(os.path.basename(item)[:-3]) # Add the export to the dictionary # The key is the module name # for example, the file glances_xxx.py # generate self._exports_list["xxx"] = ... self._exports[export_name] = export_module.Export( args=args, config=self.config) # Log plugins list logger.debug("Available exports modules list: {}".format( self.getExportList())) return True
def filter(self, value): """Set the filter (as a sting) and compute the regular expression A filter could be one of the following: - python > Process name of cmd start with python - .*python.* > Process name of cmd contain python - username:nicolargo > Process of nicolargo user """ self._filter_input = value if value is None: self._filter = None self._filter_key = None else: new_filter = value.split(':') if len(new_filter) == 1: self._filter = new_filter[0] self._filter_key = None else: self._filter = new_filter[1] self._filter_key = new_filter[0] self._filter_re = None if self.filter is not None: logger.info("Set filter to {} on key {}".format( self.filter, self.filter_key)) # Compute the regular expression try: self._filter_re = re.compile(self.filter) logger.debug("Filter regex compilation OK: {}".format( self.filter)) except Exception as e: logger.error("Cannot compile filter regex: {} ({})".format( self.filter, e)) self._filter = None self._filter_re = None self._filter_key = None
def run(self, stat_name, criticity, commands, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if self.get(stat_name) == criticity or not self.start_timer.finished(): # Action already executed => Exit return False logger.debug("Run action {} for {} ({}) with stats {}".format( commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {} ({}): {}".format(stat_name, criticity, cmd_full)) logger.debug("Stats value for the trigger: {}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({})".format(e)) self.set(stat_name, criticity) return True
def reset_stats_history(self): """Reset the stats history (dict of GlancesAttribute).""" if self._history_enable(): reset_list = [a['name'] for a in self.get_items_history_list()] logger.debug("Reset history for plugin {0} (items: {1})".format( self.plugin_name, reset_list)) self.stats_history.reset()
def update(self): """Update the command result attributed.""" # Search application monitored processes by a regular expression processlist = gl_processes.getalllist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): try: amps_list = [p for p in processlist for c in p['cmdline'] if re.search(v.regex(), c) is not None] except TypeError: continue if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} process detected (PID={})".format(k, amps_list[0]['pid'])) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" is countmin is defined v.set_result("No running process") return self.__amps_dict
def wrapper(*args, **kw): ret = fct(*args, **kw) logger.debug("%s %s %s return %s" % (args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.__name__, ret)) return ret
def load_conf(self, section="opentsdb"): """Load the OpenTSDB configuration in the Glances configuration file.""" if self.config is None: return False try: self.host = self.config.get_value(section, 'host') self.port = self.config.get_value(section, 'port') except NoSectionError: logger.critical("No OpenTSDB configuration found") return False except NoOptionError as e: logger.critical("Error in the OpenTSDB configuration (%s)" % e) return False else: logger.debug("Load OpenTSDB from the Glances configuration file") # Prefix is optional try: self.prefix = self.config.get_value(section, 'prefix') except NoOptionError: pass # Tags are optional, comma separated key:value pairs. try: self.tags = self.config.get_value(section, 'tags') except NoOptionError: pass return True
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # Create DB input if self.version == INFLUXDB_08: data = [{'name': name, 'columns': columns, 'points': [points]}] else: # Convert all int to float (mandatory for InfluxDB>0.9.2) # Correct issue#750 and issue#749 for i, _ in enumerate(points): try: points[i] = float(points[i]) except (TypeError, ValueError) as e: logger.debug( "InfluxDB error during stat convertion %s=%s (%s)" % (columns[i], points[i], e)) data = [{ 'measurement': name, 'tags': self.parse_tags(self.tags), 'fields': dict(zip(columns, points)) }] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {} stats to InfluxDB ({})".format( name, e))
def export(self, name, columns, points): """Write the points to the ES server.""" logger.debug("Export {} stats to ElasticSearch".format(name)) # Create DB input # https://elasticsearch-py.readthedocs.io/en/master/helpers.html actions = [] for c, p in zip(columns, points): action = { "_index": self.index, "_type": name, "_id": c, "_source": { "value": str(p), "timestamp": datetime.now() } } actions.append(action) # Write input to the ES index try: helpers.bulk(self.client, actions) except Exception as e: logger.error("Cannot export {} stats to ElasticSearch ({})".format( name, e))
def load_conf(self, section="cassandra"): """Load the Cassandra configuration in the Glances configuration file.""" if self.config is None: return False try: self.host = self.config.get_value(section, 'host') self.port = self.config.get_value(section, 'port') self.keyspace = self.config.get_value(section, 'keyspace') except NoSectionError: logger.critical("No Cassandra configuration found") return False except NoOptionError as e: logger.critical("Error in the Cassandra configuration (%s)" % e) return False else: logger.debug("Load Cassandra from the Glances configuration file") # Optionals keys try: self.protocol_version = self.config.get_value( section, 'protocol_version') except NoOptionError: pass try: self.replication_factor = self.config.get_value( section, 'replication_factor') except NoOptionError: pass try: self.table = self.config.get_value(section, 'table') except NoOptionError: self.table = self.host return True
def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """ for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory try: value['path'] = self.config.get_value(section, key + 'path') except Exception as e: logger.error("Cannot read folder list: {}".format(e)) continue if value['path'] is None: continue # Optional conf keys for i in ['careful', 'warning', 'critical']: try: value[i] = self.config.get_value(section, key + i) except: value[i] = None logger.debug("No {} threshold for folder {}".format( i, value["path"])) # Add the item to the list self.__folder_list.append(value)
def _port_scan_tcp(self, port): """Scan the (TCP) port structure (dict) and update the status key""" ret = None # Create and configure the scanning socket try: socket.setdefaulttimeout(port['timeout']) _socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except Exception as e: logger.debug("{0}: Error while creating scanning socket".format( self.plugin_name)) # Scan port ip = self._resolv_name(port['host']) counter = Counter() try: ret = _socket.connect_ex((ip, int(port['port']))) except Exception as e: logger.debug("{0}: Error while scanning port {1} ({2})".format( self.plugin_name, port, e)) else: if ret == 0: port['status'] = counter.get() else: port['status'] = False finally: _socket.close() return ret
def _port_scan_icmp(self, port): """Scan the (ICMP) port structure (dict) and update the status key""" ret = None # Create the ping command # Use the system ping command because it already have the steacky bit set # Python can not create ICMP packet with non root right cmd = ['ping', '-n' '-c', '1', self._resolv_name(port['host'])] fnull = open(os.devnull, 'w') try: counter = Counter() ret = subprocess.check_call(cmd, stdout=fnull, stderr=fnull, close_fds=True) if ret == 0: port['status'] = counter.get() else: port['status'] = False except Exception as e: logger.debug("{0}: Error while pinging host ({2})".format( self.plugin_name, port['host'], e)) return ret
def init_stats_history(self): """Init the stats history (dict of GlancesAttribute).""" if self._history_enable(): init_list = [a['name'] for a in self.get_items_history_list()] logger.debug( "Stats history activated for plugin {0} (items: {1})".format( self.plugin_name, init_list)) return GlancesHistory()
def get_docker_cpu(self, container_id, all_stats): """Return the container CPU usage. Input: id is the full container id all_stats is the output of the stats method of the Docker API Output: a dict {'total': 1.49} """ cpu_new = {} ret = {'total': 0.0} # Read the stats # For each container, you will find a pseudo-file cpuacct.stat, # containing the CPU usage accumulated by the processes of the container. # Those times are expressed in ticks of 1/USER_HZ of a second. # On x86 systems, USER_HZ is 100. try: cpu_new['total'] = all_stats['cpu_stats']['cpu_usage'][ 'total_usage'] cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage'] cpu_new['nb_core'] = len( all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or []) except KeyError as e: # all_stats do not have CPU information logger.debug( "Can not grab CPU usage for container {0} ({1})".format( container_id, e)) logger.debug(all_stats) else: # Previous CPU stats stored in the cpu_old variable if not hasattr(self, 'cpu_old'): # First call, we init the cpu_old variable self.cpu_old = {} try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass if container_id not in self.cpu_old: try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass else: # cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total']) system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system']) if cpu_delta > 0.0 and system_delta > 0.0: ret['total'] = (cpu_delta / system_delta) * float( cpu_new['nb_core']) * 100 # Save stats to compute next stats self.cpu_old[container_id] = cpu_new # Return the stats return ret
def _resolv_name(self, hostname): """Convert hostname to IP address""" ip = hostname try: ip = socket.gethostbyname(hostname) except Exception as e: logger.debug("{0}: Can not convert {1} to IP address ({2})".format( self.plugin_name, hostname, e)) return ip
def load_config(self, config): '''Load the outputs section of the configuration file''' # Load the theme if config is not None and config.has_section('outputs'): logger.debug('Read the outputs section in the configuration file') self.theme['name'] = config.get_value('outputs', 'curse_theme', default='black') logger.debug('Theme for the curse interface: {}'.format( self.theme['name']))
def __init__(self): """Init batteries stats.""" try: self.bat = batinfo.batteries() self.initok = True self.bat_list = [] self.update() except Exception as e: self.initok = False logger.debug("Cannot init GlancesGrabBat class (%s)" % e)
def __init__(self, stats): """Init the class""" logger.debug( "ports plugin - Create thread for scan list {}".format(stats)) super(ThreadScanner, self).__init__() # Event needed to stop properly the thread self._stopper = threading.Event() # The class return the stats as a list of dict self._stats = stats # Is part of Ports plugin self.plugin_name = "ports"
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue stat_name = '{}.{}'.format(name, columns[i]) stat_value = points[i] try: self.client.gauge(stat_name, stat_value) except Exception as e: logger.error("Can not export stats to Statsd (%s)" % e) logger.debug("Export {} stats to Statsd".format(name))
def get_export(self): """Overwrite the default export method. - Only exports containers - The key is the first container name """ ret = [] try: ret = self.stats['containers'] except KeyError as e: logger.debug("Docker export error {}".format(e)) return ret
def __init__(self, config=None, args=None): """Init the export class.""" # Export name (= module name without glances_) self.export_name = self.__class__.__module__[len('glances_'):] logger.debug("Init export interface %s" % self.export_name) # Init the config & args self.config = config self.args = args # By default export is disable # Had to be set to True in the __init__ class of child self.export_enable = False
def __init__(self, config=None, args=None): logger.debug("[GlancesStats.__init__]") # Set the config instance self.config = config # Set the argument instance self.args = args # Load plugins and exports modules self.load_modules(self.args) # Load the limits (for plugins) self.load_limits(config)
def export(self, input_stats=None): """Export all the stats. Each export module is ran in a dedicated thread. """ # threads = [] input_stats = input_stats or {} for e in self._exports: logger.debug("Export stats using the %s module" % e) thread = threading.Thread(target=self._exports[e].update, args=(input_stats, )) # threads.append(thread) thread.start()
def get_key(self, window): # Catch ESC key AND numlock key (issue #163) keycode = [0, 0] keycode[0] = window.getch() keycode[1] = window.getch() if keycode != [-1, -1]: logger.debug("Keypressed (code: %s)" % keycode) if keycode[0] == 27 and keycode[1] != -1: # Do not escape on specials keys return -1 else: return keycode[0]
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue stat_name = '{}.{}.{}'.format(self.prefix, name, columns[i]) stat_value = points[i] tags = self.parse_tags(self.tags) try: self.client.send(stat_name, stat_value, **tags) except Exception as e: logger.error("Can not export stats %s to OpenTSDB (%s)" % (name, e)) logger.debug("Export {} stats to OpenTSDB".format(name))
def __init__(self, docker_client, container_id): """Init the class: docker_client: instance of Docker-py client container_id: Id of the container""" logger.debug("docker plugin - Create thread for container {}".format( container_id[:12])) super(ThreadDockerGrabber, self).__init__() # Event needed to stop properly the thread self._stopper = threading.Event() # The docker-py return stats as a stream self._container_id = container_id self._stats_stream = docker_client.stats(container_id, decode=True) # The class return the stats as a dict self._stats = {}
def connect(self, version=None): """Connect to the Docker server.""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version version = re.search( '(?:server API version|server)\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug( "Docker plugin is disable because an error has been detected") return ret