def __set_monitor_list(self, section, key): """Init the monitored processes list. The list is defined in the configuration file. """ for l in range(1, self.__monitor_list_max_size + 1): value = {} key = "list_" + str(l) + "_" try: description = self.config.get_value(section, key + 'description') regex = self.config.get_value(section, key + 'regex') command = self.config.get_value(section, key + 'command') countmin = self.config.get_value(section, key + 'countmin') countmax = self.config.get_value(section, key + 'countmax') except Exception as e: logger.error("Cannot read monitored list: {0}".format(e)) else: if description is not None and regex is not None: # Build the new item value["description"] = description try: re.compile(regex) except Exception: continue else: value["regex"] = regex value["command"] = command value["countmin"] = countmin value["countmax"] = countmax value["count"] = None value["result"] = None # Add the item to the list self.__monitor_list.append(value)
def update(self): """Update the stats using SNMP.""" # For each plugins, call the update method for p in self._plugins: # Set the input method to SNMP self._plugins[p].input_method = 'snmp' self._plugins[p].short_system_name = self.system_name try: self._plugins[p].update() except Exception as e: logger.error("Update {0} failed: {1}".format(p, e))
def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format( self.process_filter)) except Exception: logger.error( "Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None
def get_stats_value(self, item, value): """Return the stats object for a specific item=value in JSON format. Stats should be a list of dict (processlist, network...) """ if not isinstance(self.stats, list): return None else: if value.isdigit(): value = int(value) try: return json.dumps({value: [i for i in self.stats if i[item] == value]}) except (KeyError, ValueError) as e: logger.error( "Cannot get item({0})=value({1}) ({2})".format(item, value, e)) return None
def update(self): """Update sensors stats using the input method.""" # Reset the stats self.reset() if self.input_method == 'local': # Update stats using the dedicated lib self.stats = [] # Get the temperature try: temperature = self.__set_type( self.glancesgrabsensors.get('temperature_core'), 'temperature_core') except Exception as e: logger.error("Cannot grab sensors temperatures (%s)" % e) else: # Append temperature self.stats.extend(temperature) # Get the FAN speed try: fan_speed = self.__set_type( self.glancesgrabsensors.get('fan_speed'), 'fan_speed') except Exception as e: logger.error("Cannot grab FAN speed (%s)" % e) else: # Append FAN speed self.stats.extend(fan_speed) # Update HDDtemp stats try: hddtemp = self.__set_type(self.hddtemp_plugin.update(), 'temperature_hdd') except Exception as e: logger.error("Cannot grab HDD temperature (%s)" % e) else: # Append HDD temperature self.stats.extend(hddtemp) # Update batteries stats try: batpercent = self.__set_type(self.batpercent_plugin.update(), 'battery') except Exception as e: logger.error("Cannot grab battery percent (%s)" % e) else: # Append Batteries % self.stats.eglancesgrabsensorsxtend(batpercent) elif self.input_method == 'snmp': # Update stats using SNMP # No standard: # http://www.net-snmp.org/wiki/index.php/Net-SNMP_and_lm-sensors_on_Ubuntu_10.04 pass # Update the view # self.update_views() return self.stats
def connect(self, version=None): """Connect to the Docker server.""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version version = re.search('server\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug("Docker plugin is disable because an error has been detected") return ret
def get_docker_memory_old(self, container_id): """Return the container MEMORY usage by reading /sys/fs/cgroup/. Input: id is the full container id Output: a dict {'rss': 1015808, 'cache': 356352} """ ret = {} # Read the stats try: with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.stat', 'r') as f: for line in f: m = re.search(r"(rss|cache)\s+(\d+)", line) if m: ret[m.group(1)] = int(m.group(2)) except IOError as e: logger.error("Can not grab container MEM stat ({0})".format(e)) return ret # Return the stats return ret
def get_stats_item(self, item): """Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...) """ if isinstance(self.stats, dict): try: return json.dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {0} ({1})".format(item, e)) return None elif isinstance(self.stats, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return json.dumps({item: map(itemgetter(item), self.stats)}) except (KeyError, ValueError) as e: logger.error("Cannot get item {0} ({1})".format(item, e)) return None else: return None
def get_docker_cpu_old(self, container_id): """Return the container CPU usage by reading /sys/fs/cgroup/. Input: id is the full container id Output: a dict {'total': 1.49, 'user': 0.65, 'system': 0.84} """ ret = {} # Read the stats try: with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f: for line in f: m = re.search(r"(system|user)\s+(\d+)", line) if m: ret[m.group(1)] = int(m.group(2)) except IOError as e: logger.error("Can not grab container CPU stat ({0})".format(e)) return ret if isinstance(ret["system"], numbers.Number) and isinstance(ret["user"], numbers.Number): ret["total"] = ret["system"] + ret["user"] # Return the stats return ret
def run(self, stat_name, criticity, commands, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if self.get(stat_name) == criticity: # Action already executed => Exit return False logger.debug("Run action {0} for {1} ({2}) with stats {3}".format( commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {0} ({1}): {2}".format( stat_name, criticity, cmd_full)) logger.debug( "Stats value for the trigger: {0}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({0})".format(e)) self.set(stat_name, criticity) return True
def update(self): """Update Docker stats using the input method.""" # Reset stats self.reset() # Get the current Docker API client if not self.docker_client: # First time, try to connect to the server self.docker_client = self.connect() if self.docker_client is None: global docker_tag docker_tag = False # The Docker-py lib is mandatory if not docker_tag or (self.args is not None and self.args.disable_docker): return self.stats if self.input_method == 'local': # Update stats # Exemple: { # "KernelVersion": "3.16.4-tinycore64", # "Arch": "amd64", # "ApiVersion": "1.15", # "Version": "1.3.0", # "GitCommit": "c78088f", # "Os": "linux", # "GoVersion": "go1.3.3" # } self.stats['version'] = self.docker_client.version() # Example: [{u'Status': u'Up 36 seconds', # u'Created': 1420378904, # u'Image': u'nginx:1', # u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443}, # {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}], # u'Command': u"nginx -g 'daemon off;'", # u'Names': [u'/webstack_nginx_1'], # u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}] self.stats['containers'] = self.docker_client.containers() # Get stats for all containers for c in self.stats['containers']: if not hasattr(self, 'docker_stats'): # Create a dict with all the containers' stats instance self.docker_stats = {} # TODO: Find a way to correct this # The following optimization is not compatible with the network stats # The self.docker_client.stats method should be call every time in order to have network stats refreshed # Nevertheless, if we call it every time, unicon is slow... if c['Id'] not in self.docker_stats: # Create the stats instance for the current container try: self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True) logger.debug("Create Docker stats object for container {}".format(c['Id'])) except Exception as e: # Correct Issue #602 logger.error("Can not call Docker stats method {}".format(e)) # Get the docker stats try: # self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True) all_stats = self.docker_stats[c['Id']].next() except Exception: all_stats = {} c['cpu'] = self.get_docker_cpu(c['Id'], all_stats) c['memory'] = self.get_docker_memory(c['Id'], all_stats) # c['network'] = self.get_docker_network(c['Id'], all_stats) elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass return self.stats
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not is_windows and is_kernel_thread( proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (is_bsd and processdict[proc]['name'] == 'idle' or is_windows and processdict[proc]['name'] == 'System Idle Process' or is_mac and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(processdict.items(), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format( self.sort_key, e)) logger.error("%s" % str(processdict.items()[0])) # Fallback to all process (issue #423) processloop = processdict.items() first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = processdict.items() first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = processdict.values() # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()