def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # Create DB input if self.version == INFLUXDB_08: data = [{'name': name, 'columns': columns, 'points': [points]}] else: # Convert all int to float (mandatory for InfluxDB>0.9.2) # Correct issue#750 and issue#749 for i, _ in enumerate(points): try: points[i] = float(points[i]) except (TypeError, ValueError) as e: logger.debug( "InfluxDB error during stat convertion %s=%s (%s)" % (columns[i], points[i], e)) data = [{ 'measurement': name, 'tags': self.parse_tags(self.tags), 'fields': dict(zip(columns, points)) }] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {} stats to InfluxDB ({})".format( name, e))
def get_stats_history(self, item=None, nb=0): """Return the stats history as a JSON object (dict or None). Limit to lasts nb items (all if nb=0)""" s = self.get_json_history(nb=nb) if item is None: return self._json_dumps(s) if isinstance(s, dict): try: return self._json_dumps({item: s[item]}) except KeyError as e: logger.error("Cannot get item history {0} ({1})".format( item, e)) return None elif isinstance(s, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return self._json_dumps({item: map(itemgetter(item), s)}) except (KeyError, ValueError) as e: logger.error("Cannot get item history {0} ({1})".format( item, e)) return None else: return None
def filter(self, value): """Set the filter (as a sting) and compute the regular expression A filter could be one of the following: - python > Process name of cmd start with python - .*python.* > Process name of cmd contain python - username:nicolargo > Process of nicolargo user """ self._filter_input = value if value is None: self._filter = None self._filter_key = None else: new_filter = value.split(':') if len(new_filter) == 1: self._filter = new_filter[0] self._filter_key = None else: self._filter = new_filter[1] self._filter_key = new_filter[0] self._filter_re = None if self.filter is not None: logger.info("Set filter to {} on key {}".format( self.filter, self.filter_key)) # Compute the regular expression try: self._filter_re = re.compile(self.filter) logger.debug("Filter regex compilation OK: {}".format( self.filter)) except Exception as e: logger.error("Cannot compile filter regex: {} ({})".format( self.filter, e)) self._filter = None self._filter_re = None self._filter_key = None
def export(self, name, columns, points): """Write the points to the ES server.""" logger.debug("Export {} stats to ElasticSearch".format(name)) # Create DB input # https://elasticsearch-py.readthedocs.io/en/master/helpers.html actions = [] for c, p in zip(columns, points): action = { "_index": self.index, "_type": name, "_id": c, "_source": { "value": str(p), "timestamp": datetime.now() } } actions.append(action) # Write input to the ES index try: helpers.bulk(self.client, actions) except Exception as e: logger.error("Cannot export {} stats to ElasticSearch ({})".format( name, e))
def __set_folder_list(self, section): """Init the monitored folder list. The list is defined in the Glances configuration file. """ for l in range(1, self.__folder_list_max_size + 1): value = {} key = 'folder_' + str(l) + '_' # Path is mandatory try: value['path'] = self.config.get_value(section, key + 'path') except Exception as e: logger.error("Cannot read folder list: {}".format(e)) continue if value['path'] is None: continue # Optional conf keys for i in ['careful', 'warning', 'critical']: try: value[i] = self.config.get_value(section, key + i) except: value[i] = None logger.debug("No {} threshold for folder {}".format( i, value["path"])) # Add the item to the list self.__folder_list.append(value)
def run(self, stat_name, criticity, commands, mustache_dict=None): """Run the commands (in background). - stats_name: plugin_name (+ header) - criticity: criticity of the trigger - commands: a list of command line with optional {{mustache}} - mustache_dict: Plugin stats (can be use within {{mustache}}) Return True if the commands have been ran. """ if self.get(stat_name) == criticity or not self.start_timer.finished(): # Action already executed => Exit return False logger.debug("Run action {} for {} ({}) with stats {}".format( commands, stat_name, criticity, mustache_dict)) # Run all actions in background for cmd in commands: # Replace {{arg}} by the dict one (Thk to {Mustache}) if pystache_tag: cmd_full = pystache.render(cmd, mustache_dict) else: cmd_full = cmd # Execute the action logger.info("Action triggered for {} ({}): {}".format(stat_name, criticity, cmd_full)) logger.debug("Stats value for the trigger: {}".format(mustache_dict)) try: Popen(cmd_full, shell=True) except OSError as e: logger.error("Can't execute the action ({})".format(e)) self.set(stat_name, criticity) return True
def load(self, config): """Load the server list from the configuration file.""" server_list = [] if config is None: logger.debug( "No configuration file available. Cannot load server list.") elif not config.has_section(self._section): logger.warning( "No [%s] section in the configuration file. Cannot load server list." % self._section) else: logger.info( "Start reading the [%s] section in the configuration file" % self._section) for i in range(1, 256): new_server = {} postfix = 'server_%s_' % str(i) # Read the server name (mandatory) for s in ['name', 'port', 'alias']: new_server[s] = config.get_value(self._section, '%s%s' % (postfix, s)) if new_server['name'] is not None: # Manage optionnal information if new_server['port'] is None: new_server['port'] = '61209' new_server['username'] = '******' # By default, try empty (aka no) password new_server['password'] = '' try: new_server['ip'] = gethostbyname(new_server['name']) except gaierror as e: logger.error( "Cannot get IP address for server %s (%s)" % (new_server['name'], e)) continue new_server[ 'key'] = new_server['name'] + ':' + new_server['port'] # Default status is 'UNKNOWN' new_server['status'] = 'UNKNOWN' # Server type is 'STATIC' new_server['type'] = 'STATIC' # Add the server to the list logger.debug("Add server %s to the static list" % new_server['name']) server_list.append(new_server) # Server list loaded logger.info("%s server(s) loaded from the configuration file" % len(server_list)) logger.debug("Static server list: %s" % server_list) return server_list
def get_item_key(self, item): """Return the value of the item 'key'.""" try: ret = item[item['key']] except KeyError: logger.error("No 'key' available in {}".format(item)) if isinstance(ret, list): return ret[0] else: return ret
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue stat_name = '{}.{}'.format(name, columns[i]) stat_value = points[i] try: self.client.gauge(stat_name, stat_value) except Exception as e: logger.error("Can not export stats to Statsd (%s)" % e) logger.debug("Export {} stats to Statsd".format(name))
def _init_history(self): '''Init the history option''' self.reset_history_tag = False self.graph_tag = False if self.args.export_graph: logger.info('Export graphs function enabled with output path %s' % self.args.path_graph) from gl.exports.graph import GlancesGraph self.glances_graph = GlancesGraph(self.args.path_graph) if not self.glances_graph.graph_enabled(): self.args.export_graph = False logger.error('Export graphs disabled')
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue stat_name = '{}.{}.{}'.format(self.prefix, name, columns[i]) stat_value = points[i] tags = self.parse_tags(self.tags) try: self.client.send(stat_name, stat_value, **tags) except Exception as e: logger.error("Can not export stats %s to OpenTSDB (%s)" % (name, e)) logger.debug("Export {} stats to OpenTSDB".format(name))
def connect(self, version=None): """Connect to the Docker server.""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version version = re.search( '(?:server API version|server)\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug( "Docker plugin is disable because an error has been detected") return ret
def __init__(self, config): """Init the folder list from the configuration file, if it exists.""" self.config = config if self.config is not None and self.config.has_section('folders'): if scandir_tag: # Process monitoring list logger.debug("Folder list configuration detected") self.__set_folder_list('folders') else: logger.error( 'Scandir not found. Please use Python 3.5+ or install the scandir lib' ) else: self.__folder_list = []
def export(self, name, columns, points): """Write the points in RabbitMQ.""" data = ('hostname=' + self.hostname + ', name=' + name + ', dateinfo=' + datetime.datetime.utcnow().isoformat()) for i in range(len(columns)): if not isinstance(points[i], Number): continue else: data += ", " + columns[i] + "=" + str(points[i]) logger.debug(data) try: self.client.basic_publish(exchange='', routing_key=self.rabbitmq_queue, body=data) except Exception as e: logger.error("Can not export stats to RabbitMQ (%s)" % e)
def export(self, name, columns, points): """Write the points in Riemann.""" for i in range(len(columns)): if not isinstance(points[i], Number): continue else: data = { 'host': self.hostname, 'service': name + " " + columns[i], 'metric': points[i] } logger.debug(data) try: self.client.send(data) except Exception as e: logger.error("Cannot export stats to Riemann (%s)" % e)
def update(self): """Update sensors stats using the input method.""" # Reset the stats self.reset() if self.input_method == 'local': # Update stats using the dedicated lib self.stats = [] # Get the temperature try: temperature = self.__set_type( self.glancesgrabsensors.get('temperature_core'), 'temperature_core') except Exception as e: logger.error("Cannot grab sensors temperatures (%s)" % e) else: # Append temperature self.stats.extend(temperature) # Get the FAN speed try: fan_speed = self.__set_type( self.glancesgrabsensors.get('fan_speed'), 'fan_speed') except Exception as e: logger.error("Cannot grab FAN speed (%s)" % e) else: # Append FAN speed self.stats.extend(fan_speed) # Update HDDtemp stats try: hddtemp = self.__set_type(self.hddtemp_plugin.update(), 'temperature_hdd') except Exception as e: logger.error("Cannot grab HDD temperature (%s)" % e) else: # Append HDD temperature self.stats.extend(hddtemp) # Update batteries stats try: batpercent = self.__set_type(self.batpercent_plugin.update(), 'battery') except Exception as e: logger.error("Cannot grab battery percent (%s)" % e) else: # Append Batteries % self.stats.extend(batpercent) elif self.input_method == 'snmp': # Update stats using SNMP # No standard: # http://www.net-snmp.org/wiki/index.php/Net-SNMP_and_lm-sensors_on_Ubuntu_10.04 pass # Update the view self.update_views() return self.stats
def get_stats_value(self, item, value): """Return the stats object for a specific item=value in JSON format. Stats should be a list of dict (processlist, network...) """ if not isinstance(self.stats, list): return None else: if value.isdigit(): value = int(value) try: return self._json_dumps( {value: [i for i in self.stats if i[item] == value]}) except (KeyError, ValueError) as e: logger.error("Cannot get item({})=value({}) ({})".format( item, value, e)) return None
def export(self, name, columns, points): """Write the points to the Cassandra cluster.""" logger.debug("Export {} stats to Cassandra".format(name)) # Remove non number stats and convert all to float (for Boolean) data = { k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number) } # Write input to the Cassandra table try: self.session.execute( """ INSERT INTO localhost (plugin, time, stat) VALUES (%s, %s, %s) """, (name, uuid_from_time(datetime.now()), data)) except Exception as e: logger.error("Cannot export {} stats to Cassandra ({})".format( name, e))
def get_stats_item(self, item): """Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...) """ if isinstance(self.stats, dict): try: return self._json_dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {} ({})".format(item, e)) return None elif isinstance(self.stats, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return self._json_dumps( {item: map(itemgetter(item), self.stats)}) except (KeyError, ValueError) as e: logger.error("Cannot get item {} ({})".format(item, e)) return None else: return None
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and is_kernel_thread(proc): continue # If self.max_processes is None: Only retrieve mandatory stats # Else: retrieve mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Check if s is note None (issue #879) # waiting for upstream patch from psutil if s is None: continue # Continue to the next process if it has to be filtered if self._filter.is_filtered(s): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {}: {}".format(self.sort_key, e)) logger.error('{}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the AMPs self.allprocesslist = [p for p in itervalues(processdict)] # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def update(self): """Update Docker stats using the input method.""" # Reset stats self.reset() # Get the current Docker API client if not self.docker_client: # First time, try to connect to the server self.docker_client = self.connect() if self.docker_client is None: global docker_tag docker_tag = False # The Docker-py lib is mandatory if not docker_tag or (self.args is not None and self.args.disable_docker): return self.stats if self.input_method == 'local': # Update stats # Docker version # Exemple: { # "KernelVersion": "3.16.4-tinycore64", # "Arch": "amd64", # "ApiVersion": "1.15", # "Version": "1.3.0", # "GitCommit": "c78088f", # "Os": "linux", # "GoVersion": "go1.3.3" # } try: self.stats['version'] = self.docker_client.version() except Exception as e: # Correct issue#649 logger.error( "{} plugin - Cannot get Docker version ({})".format( self.plugin_name, e)) return self.stats # Container globals information # Example: [{u'Status': u'Up 36 seconds', # u'Created': 1420378904, # u'Image': u'nginx:1', # u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443}, # {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}], # u'Command': u"nginx -g 'daemon off;'", # u'Names': [u'/webstack_nginx_1'], # u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}] # Update current containers list try: self.stats['containers'] = self.docker_client.containers( ) or [] except Exception as e: logger.error( "{} plugin - Cannot get containers list ({})".format( self.plugin_name, e)) return self.stats # Start new thread for new container for container in self.stats['containers']: if container['Id'] not in self.thread_list: # Thread did not exist in the internal dict # Create it and add it to the internal dict logger.debug( "{} plugin - Create thread for container {}".format( self.plugin_name, container['Id'][:12])) t = ThreadDockerGrabber(self.docker_client, container['Id']) self.thread_list[container['Id']] = t t.start() # Stop threads for non-existing containers nonexisting_containers = set(iterkeys(self.thread_list)) - set( [c['Id'] for c in self.stats['containers']]) for container_id in nonexisting_containers: # Stop the thread logger.debug( "{} plugin - Stop thread for old container {}".format( self.plugin_name, container_id[:12])) self.thread_list[container_id].stop() # Delete the item from the dict del self.thread_list[container_id] # Get stats for all containers for container in self.stats['containers']: # The key is the container name and not the Id container['key'] = self.get_key() # Export name (first name in the list, without the /) container['name'] = container['Names'][0][1:] container['cpu'] = self.get_docker_cpu( container['Id'], self.thread_list[container['Id']].stats) container['memory'] = self.get_docker_memory( container['Id'], self.thread_list[container['Id']].stats) container['network'] = self.get_docker_network( container['Id'], self.thread_list[container['Id']].stats) container['io'] = self.get_docker_io( container['Id'], self.thread_list[container['Id']].stats) elif self.input_method == 'snmp': # Update stats using SNMP # Not available pass return self.stats