def update(self): """Update IP stats using the input method. Stats is dict """ # Reset stats self.reset() if self.input_method == 'local' and netifaces_tag: # Update stats using the netifaces lib try: default_gw = netifaces.gateways()['default'][netifaces.AF_INET] except (KeyError, AttributeError) as e: logger.debug("Cannot grab the default gateway ({0})".format(e)) else: try: self.stats['address'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['addr'] self.stats['mask'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['netmask'] self.stats['mask_cidr'] = self.ip_to_cidr(self.stats['mask']) self.stats['gateway'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except (KeyError, AttributeError) as e: logger.debug("Can not grab IP information (%s)".format(e)) elif self.input_method == 'snmp': # Not implemented yet pass # Update the view self.update_views() return self.stats
def load_conf(self, section="influxdb"): """Load the InfluxDb configuration in the Glances configuration file.""" if self.config is None: return False try: self.host = self.config.get_value(section, 'host') self.port = self.config.get_value(section, 'port') self.user = self.config.get_value(section, 'user') self.password = self.config.get_value(section, 'password') self.db = self.config.get_value(section, 'db') except NoSectionError: logger.critical("No InfluxDB configuration found") return False except NoOptionError as e: logger.critical("Error in the InfluxDB configuration (%s)" % e) return False else: logger.debug("Load InfluxDB from the Glances configuration file") # Prefix is optional try: self.prefix = self.config.get_value(section, 'prefix') except NoOptionError: pass # Tags are optional, comma separated key:value pairs. try: self.tags = self.config.get_value(section, 'tags') except NoOptionError: self.tags = '' return True
def __init__(self, config=None, args=None): # Init stats self.stats = GlancesStats(config=config, args=args) # Default number of processes to displayed is set to 50 glances_processes.set_max_processes(50) # If process extended stats is disabled by user if not args.enable_process_extended: logger.info("Extended stats for top process are disabled (default behavior)") glances_processes.disable_extended() else: logger.debug("Extended stats for top process are enabled") glances_processes.enable_extended() # Manage optionnal process filter if args.process_filter is not None: glances_processes.set_process_filter(args.process_filter) if (not is_windows) and args.no_kernel_threads: # Ignore kernel threads in process list glances_processes.disable_kernel_threads() if args.process_tree: # Enable process tree view glances_processes.enable_tree() # Initial system informations update self.stats.update() # Init screen self.screen = GlancesCursesStandalone(args=args)
def load_exports(self, args=None): """Load all exports module in the 'exports' folder.""" if args is None: return False header = "glances_" # Transform the arguments list into a dict # The aim is to chec if the export module should be loaded args_var = vars(locals()['args']) for item in os.listdir(exports_path): export_name = os.path.basename(item)[len(header):-3].lower() if (item.startswith(header) and item.endswith(".py") and item != (header + "export.py") and item != (header + "history.py") and args_var['export_' + export_name] is not None and args_var['export_' + export_name] is not False): # Import the export module export_module = __import__(os.path.basename(item)[:-3]) # Add the export to the dictionary # The key is the module name # for example, the file glances_xxx.py # generate self._exports_list["xxx"] = ... self._exports[export_name] = export_module.Export(args=args, config=self.config) # Log plugins list logger.debug("Available exports modules list: {0}".format(self.getAllExports())) return True
def __init__(self, config=None, args=None, timeout=7, return_to_browser=False): # Store the arg/config self.args = args self.config = config # Default client mode self._client_mode = 'glances' # Return to browser or exit self.return_to_browser = return_to_browser # Build the URI if args.password != "": uri = 'http://{0}:{1}@{2}:{3}'.format(args.username, args.password, args.client, args.port) else: uri = 'http://{0}:{1}'.format(args.client, args.port) logger.debug("Try to connect to {0}".format(uri)) # Try to connect to the URI transport = GlancesClientTransport() # Configure the server timeout transport.set_timeout(timeout) try: self.client = ServerProxy(uri, transport=transport) except Exception as e: self.log_and_exit("Client couldn't create socket {0}: {1}".format(uri, e))
def update(self, servers_list): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. servers_list: Dict of dict with servers stats """ # Flush display logger.debug("Servers list: {}".format(servers_list)) self.flush(servers_list) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(servers_list) # Is it an exit or select server key ? exitkey = pressedkey == ord("\x1b") or pressedkey == ord("q") or pressedkey == 10 if not exitkey and pressedkey > -1: # Redraw display self.flush(servers_list) # Wait 100ms... curses.napms(100) return self.active_server
def __init__(self, config=None, args=None, timeout=7, return_to_browser=False): # Store the arg/config self.args = args self.config = config # Client mode: self.set_mode() # Return to browser or exit self.return_to_browser = return_to_browser # Build the URI if args.password != "": uri = 'http://{0}:{1}@{2}:{3}'.format(args.username, args.password, args.client, args.port) else: uri = 'http://{0}:{1}'.format(args.client, args.port) logger.debug("Try to connect to {0}".format(uri)) # Try to connect to the URI transport = GlancesClientTransport() # Configure the server timeout transport.set_timeout(timeout) try: self.client = ServerProxy(uri, transport=transport) except Exception as e: self.log_and_exit("Client couldn't create socket {0}: {1}".format( uri, e))
def update(self): """Update IP stats using the input method. Stats is dict """ # Reset stats self.reset() if self.input_method == 'local' and netifaces_tag: # Update stats using the netifaces lib try: if not 'freebsd' in sys.platform: default_gw = netifaces.gateways()['default'][netifaces.AF_INET] else: raise KeyError, 'On FreeBSD, Calling gateways would segfault' except KeyError: logger.debug("Can not grab the default gateway") else: try: self.stats['address'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['addr'] self.stats['mask'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['netmask'] self.stats['mask_cidr'] = self.ip_to_cidr(self.stats['mask']) if not 'freebsd' in sys.platform: self.stats['gateway'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except KeyError as e: logger.debug("Can not grab IP information (%s)".format(e)) elif self.input_method == 'snmp': # Not implemented yet pass # Update the view self.update_views() return self.stats
def update(self): """Update IP stats using the input method. Stats is dict """ # Reset stats self.reset() if self.input_method == 'local' and netifaces_tag: # Update stats using the netifaces lib try: default_gw = netifaces.gateways()['default'][netifaces.AF_INET] except KeyError: logger.debug("Can not grab the default gateway") else: try: self.stats['address'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['addr'] self.stats['mask'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['netmask'] self.stats['mask_cidr'] = self.ip_to_cidr(self.stats['mask']) self.stats['gateway'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except KeyError as e: logger.debug("Can not grab IP information (%s)".format(e)) elif self.input_method == 'snmp': # Not implemented yet pass # Update the view self.update_views() return self.stats
def load_exports(self, args=None): """Load all exports module in the 'exports' folder.""" if args is None: return False header = "glances_" # Transform the arguments list into a dict # The aim is to chec if the export module should be loaded args_var = vars(locals()['args']) for item in os.listdir(exports_path): export_name = os.path.basename(item)[len(header):-3].lower() if (item.startswith(header) and item.endswith(".py") and item != (header + "export.py") and item != (header + "history.py") and args_var['export_' + export_name] is not None and args_var['export_' + export_name] is not False): # Import the export module export_module = __import__(os.path.basename(item)[:-3]) # Add the export to the dictionary # The key is the module name # for example, the file glances_xxx.py # generate self._exports_list["xxx"] = ... self._exports[export_name] = export_module.Export(args=args, config=self.config) # Log plugins list logger.debug("Available exports modules list: {0}".format(self.getExportList())) return True
def load_conf(self, section="opentsdb"): """Load the OpenTSDB configuration in the Glances configuration file.""" if self.config is None: return False try: self.host = self.config.get_value(section, 'host') self.port = self.config.get_value(section, 'port') except NoSectionError: logger.critical("No OpenTSDB configuration found") return False except NoOptionError as e: logger.critical("Error in the OpenTSDB configuration (%s)" % e) return False else: logger.debug("Load OpenTSDB from the Glances configuration file") # Prefix is optional try: self.prefix = self.config.get_value(section, 'prefix') except NoOptionError: pass # Tags are optional, comma separated key:value pairs. try: self.tags = self.config.get_value(section, 'tags') except NoOptionError: self.tags = '' return True
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {0} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input new_col = [] for col in columns: if col[0:2] == "/.": new_col.append(col[2:]) else: new_col.append(col) if self.version == INFLUXDB_09: data = [{'measurement': name, 'tags': self.tags, 'fields': dict(zip(new_col, points))}] else: data = [{'name': name, 'columns': new_col, 'points': [points]}] # Write input to the InfluxDB database try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {0} stats to InfluxDB ({1})".format(name, e))
def reset_stats_history(self): """Reset the stats history (dict of list).""" if self.args is not None and self.args.enable_history and self.get_items_history_list() is not None: reset_list = [i['name'] for i in self.get_items_history_list()] logger.debug("Reset history for plugin {0} (items: {0})".format( self.plugin_name, reset_list)) self.stats_history = {}
def __init__(self, config=None, args=None): # Init stats self.stats = GlancesStats(config=config, args=args) # Default number of processes to displayed is set to 50 glances_processes.set_max_processes(50) # If process extended stats is disabled by user if not args.enable_process_extended: logger.info( "Extended stats for top process are disabled (default behavior)" ) glances_processes.disable_extended() else: logger.debug("Extended stats for top process are enabled") glances_processes.enable_extended() # Manage optionnal process filter if args.process_filter is not None: glances_processes.set_process_filter(args.process_filter) if (not is_windows) and args.no_kernel_threads: # Ignore kernel threads in process list glances_processes.disable_kernel_threads() if args.process_tree: # Enable process tree view glances_processes.enable_tree() # Initial system informations update self.stats.update() # Init screen self.screen = GlancesCursesStandalone(args=args)
def export(self, name, columns, points): """Write the points to the InfluxDB server.""" logger.debug("Export {0} stats to InfluxDB".format(name)) # Manage prefix if self.prefix is not None: name = self.prefix + '.' + name # logger.info(self.prefix) # Create DB input new_col = [] for col in columns: if col[0:2] == "/.": new_col.append(col[2:]) else: new_col.append(col) if self.version == INFLUXDB_09: data = [{ 'measurement': name, 'tags': self.tags, 'fields': dict(zip(new_col, points)) }] else: data = [{'name': name, 'columns': new_col, 'points': [points]}] # Write input to the InfluxDB database from pprint import pprint pprint(data) pprint(self.tags) try: self.client.write_points(data) except Exception as e: logger.error("Cannot export {0} stats to InfluxDB ({1})".format( name, e))
def update(self, servers_list): """Update the servers' list screen. Wait for __refresh_time sec / catch key every 100 ms. servers_list: Dict of dict with servers stats """ # Flush display logger.debug('Servers list: {0}'.format(servers_list)) self.flush(servers_list) # Wait exitkey = False countdown = Timer(self.__refresh_time) while not countdown.finished() and not exitkey: # Getkey pressedkey = self.__catch_key(servers_list) # Is it an exit or select server key ? exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10) if not exitkey and pressedkey > -1: # Redraw display self.flush(servers_list) # Wait 100ms... curses.napms(100) return self.active_server
def reset_stats_history(self): """Reset the stats history (dict of list).""" if self.args is not None and self.args.enable_history and self.get_items_history_list( ) is not None: reset_list = [i['name'] for i in self.get_items_history_list()] logger.debug("Reset history for plugin {0} (items: {0})".format( self.plugin_name, reset_list)) self.stats_history = {}
def init_stats_history(self): """Init the stats history (dict of list).""" ret = None if self.args is not None and self.args.enable_history and self.get_items_history_list() is not None: init_list = [i['name'] for i in self.get_items_history_list()] logger.debug("Stats history activated for plugin {0} (items: {0})".format( self.plugin_name, init_list)) ret = {} return ret
def load(self, config): """Load the server list from the configuration file""" server_list = [] if config is None: logger.warning( "No configuration file available. Cannot load server list.") elif not config.has_section(self._section): logger.warning( "No [%s] section in the configuration file. Cannot load server list." % self._section) else: logger.info( "Start reading the [%s] section in the configuration file" % self._section) for i in range(1, 256): new_server = {} postfix = 'server_%s_' % str(i) # Read the server name (mandatory) for s in ['name', 'port', 'alias']: new_server[s] = config.get_raw_option( self._section, '%s%s' % (postfix, s)) if new_server['name'] is not None: # Manage optionnal information if new_server['port'] is None: new_server['port'] = 61209 new_server['username'] = '******' new_server['password'] = '' try: new_server['ip'] = gethostbyname(new_server['name']) except gaierror as e: logger.error( "Cannot get IP address for server %s (%s)" % (new_server['name'], e)) continue new_server[ 'key'] = new_server['name'] + ':' + new_server['port'] # Default status is 'UNKNOWN' new_server['status'] = 'UNKNOWN' # Server type is 'STATIC' new_server['type'] = 'STATIC' # Add the server to the list logger.debug("Add server %s to the static list" % new_server['name']) server_list.append(new_server) # Server list loaded logger.info("%s server(s) loaded from the configuration file" % len(server_list)) logger.debug("Static server list: %s" % server_list) return server_list
def get_docker_cpu(self, container_id, all_stats): """Return the container CPU usage. Input: id is the full container id all_stats is the output of the stats method of the Docker API Output: a dict {'total': 1.49} """ cpu_new = {} ret = {'total': 0.0} # Read the stats # For each container, you will find a pseudo-file cpuacct.stat, # containing the CPU usage accumulated by the processes of the container. # Those times are expressed in ticks of 1/USER_HZ of a second. # On x86 systems, USER_HZ is 100. try: cpu_new['total'] = all_stats['cpu_stats']['cpu_usage'][ 'total_usage'] cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage'] cpu_new['nb_core'] = len( all_stats['cpu_stats']['cpu_usage']['percpu_usage']) except KeyError as e: # all_stats do not have CPU information logger.debug( "Can not grab CPU usage for container {0} ({1})".format( container_id, e)) else: # Previous CPU stats stored in the cpu_old variable if not hasattr(self, 'cpu_old'): # First call, we init the cpu_old variable self.cpu_old = {} try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass if container_id not in self.cpu_old: try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass else: # cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total']) system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system']) if cpu_delta > 0.0 and system_delta > 0.0: ret['total'] = (cpu_delta / system_delta) * float( cpu_new['nb_core']) * 100 # Save stats to compute next stats self.cpu_old[container_id] = cpu_new # Return the stats return ret
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["network"] except KeyError as e: # all_stats do not have NETWORK information logger.debug( "Can not grab NET usage for container {0} ({1})".format( container_id, e)) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API network_new['time_since_update'] = getTimeSinceLastUpdate( 'docker_net_{0}'.format(container_id)) network_new['rx'] = netcounters["rx_bytes"] - self.netcounters_old[ container_id]["rx_bytes"] network_new['tx'] = netcounters["tx_bytes"] - self.netcounters_old[ container_id]["tx_bytes"] network_new['cumulative_rx'] = netcounters["rx_bytes"] network_new['cumulative_tx'] = netcounters["tx_bytes"] # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
def export(self, input_stats={}): """Export all the stats. Each export module is ran in a dedicated thread.""" # threads = [] for e in self._exports: logger.debug("Export stats using the %s module" % e) thread = threading.Thread(target=self._exports[e].update, args=(input_stats, )) # threads.append(thread) thread.start()
def __init__(self): """Init batteries stats.""" try: self.bat = batinfo.batteries() self.initok = True self.bat_list = [] self.update() except Exception as e: self.initok = False logger.debug("Cannot init GlancesGrabBat class (%s)" % e)
def export(self, input_stats={}): """Export all the stats. Each export module is ran in a dedicated thread.""" # threads = [] for e in self._exports: logger.debug("Export stats using the %s module" % e) thread = threading.Thread(target=self._exports[e].update, args=(input_stats,)) # threads.append(thread) thread.start()
def __init__(self, config): """Init the monitoring list from the configuration file, if it exists.""" self.config = config if self.config is not None and self.config.has_section('monitor'): # Process monitoring list logger.debug("Monitor list configuration detected") self.__set_monitor_list('monitor', 'list') else: self.__monitor_list = []
def get_docker_cpu(self, container_id, all_stats): """Return the container CPU usage. Input: id is the full container id all_stats is the output of the stats method of the Docker API Output: a dict {'total': 1.49} """ cpu_new = {} ret = {'total': 0.0} # Read the stats # For each container, you will find a pseudo-file cpuacct.stat, # containing the CPU usage accumulated by the processes of the container. # Those times are expressed in ticks of 1/USER_HZ of a second. # On x86 systems, USER_HZ is 100. try: cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage'] cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage'] cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage']) except KeyError as e: # all_stats do not have CPU information logger.debug("Can not grab CPU usage for container {0} ({1}). Trying fallback method.".format(container_id, e)) # Trying fallback to old grab method ret = self.get_docker_cpu_old(container_id) # Get the user ticks ticks = self.get_user_ticks() for k in ret.keys(): ret[k] = float(ret[k]) / ticks else: # Previous CPU stats stored in the cpu_old variable if not hasattr(self, 'cpu_old'): # First call, we init the cpu_old variable self.cpu_old = {} try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass if container_id not in self.cpu_old: try: self.cpu_old[container_id] = cpu_new except (IOError, UnboundLocalError): pass else: # cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total']) system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system']) if cpu_delta > 0.0 and system_delta > 0.0: ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100 # Save stats to compute next stats self.cpu_old[container_id] = cpu_new # Return the stats return ret
def init_stats_history(self): """Init the stats history (dict of list)""" ret = None if self.args is not None and self.args.enable_history and self.get_items_history_list( ) is not None: init_list = [i['name'] for i in self.get_items_history_list()] logger.debug( "Stats history activated for plugin {0} (items: {0})".format( self.plugin_name, init_list)) ret = {} return ret
def get_export(self): """Overwrite the default export method. - Only exports containers - The key is the first container name """ ret = [] try: ret = self.stats['containers'] except KeyError as e: logger.debug("Docker export error {0}".format(e)) return ret
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(0, len(columns)): if not isinstance(points[i], Number): continue stat_name = '{0}.{1}.{2}'.format(self.prefix, name, columns[i]) stat_value = points[i] try: self.client.send(stat_name, stat_value, **self.tags) except Exception as e: logger.error("Can not export stats %s to OpenTSDB (%s)" % (name, e)) logger.debug("Export {0} stats to OpenTSDB".format(name))
def export(self, name, columns, points): """Export the stats to the Statsd server.""" for i in range(0, len(columns)): if not isinstance(points[i], Number): continue stat_name = '{0}.{1}'.format(name, columns[i]) stat_value = points[i] try: self.client.gauge(stat_name, stat_value) except Exception as e: logger.error("Can not export stats to Statsd (%s)" % e) logger.debug("Export {0} stats to Statsd".format(name))
def remove_server(self, name): """Remove a server from the dict.""" for i in self._server_list: if i['key'] == name: try: self._server_list.remove(i) logger.debug("Remove server %s from the list" % name) logger.debug("Updated servers list (%s servers): %s" % ( len(self._server_list), self._server_list)) except ValueError: logger.error( "Cannot remove server %s from the list" % name)
def remove_server(self, name): """Remove a server from the dict""" for i in self._server_list: if i['key'] == name: try: self._server_list.remove(i) logger.debug("Remove server %s from the list" % name) logger.debug("Updated servers list (%s servers): %s" % ( len(self._server_list), self._server_list)) except ValueError: logger.error( "Cannot remove server %s from the list" % name)
def load_limits(self, config): """Load limits from the configuration file, if it exists.""" if (hasattr(config, 'has_section') and config.has_section(self.plugin_name)): for level, _ in config.items(self.plugin_name): # Read limits limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value(self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value(self.plugin_name, level).split(",") logger.debug("Load limit: {0} = {1}".format(limit, self._limits[limit]))
def get(self, sensor_type='temperature_core'): """Get sensors list.""" self.__update__() if sensor_type == 'temperature_core': ret = [s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT] elif sensor_type == 'fan_speed': ret = [s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT] else: # Unknown type logger.debug("Unknown sensor type %s" % sensor_type) ret = [] return ret
def __init__(self, docker_client, container_id): """Init the class: docker_client: instance of Docker-py client container_id: Id of the container""" logger.debug("docker plugin - Create thread for container {0}".format(container_id[:12])) super(ThreadDockerGrabber, self).__init__() # Event needed to stop properly the thread self._stopper = threading.Event() # The docker-py return stats as a stream self._container_id = container_id self._stats_stream = docker_client.stats(container_id, decode=True) # The class return the stats as a dict self._stats = {}
def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format(self.process_filter)) except Exception: logger.error("Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None
def wrapper(*args, **kw): ret = fct(*args, **kw) if is_py3: logger.debug("%s %s %s return %s" % ( args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.__name__, ret)) else: logger.debug("%s %s %s return %s" % ( args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.func_name, ret)) return ret
def wrapper(*args, **kw): ret = fct(*args, **kw) if is_py3: logger.debug("%s %s %s return %s" % (args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.__name__, ret)) else: logger.debug("%s %s %s return %s" % (args[0].__class__.__name__, args[0].__class__.__module__[len('glances_'):], fct.func_name, ret)) return ret
def __init__(self, config=None, args=None): """Init the export class.""" # Export name (= module name without glances_) self.export_name = self.__class__.__module__[len('glances_'):] logger.debug("Init export interface %s" % self.export_name) # Init the config & args self.config = config self.args = args # By default export is disable # Had to be set to True in the __init__ class of child self.export_enable = False
def get_docker_network(self, container_id, all_stats): """Return the container network usage using the Docker API (v1.0 or higher). Input: id is the full container id Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}. with: time_since_update: number of seconds elapsed between the latest grab rx: Number of byte received tx: Number of byte transmited """ # Init the returned dict network_new = {} # Read the rx/tx stats (in bytes) try: netcounters = all_stats["network"] except KeyError as e: # all_stats do not have NETWORK information logger.debug("Can not grab NET usage for container {0} ({1})".format(container_id, e)) # No fallback available... return network_new # Previous network interface stats are stored in the network_old variable if not hasattr(self, 'inetcounters_old'): # First call, we init the network_old var self.netcounters_old = {} try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass if container_id not in self.netcounters_old: try: self.netcounters_old[container_id] = netcounters except (IOError, UnboundLocalError): pass else: # By storing time data we enable Rx/s and Tx/s calculations in the # XML/RPC API, which would otherwise be overly difficult work # for users of the API network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{0}'.format(container_id)) network_new['rx'] = netcounters["rx_bytes"] - self.netcounters_old[container_id]["rx_bytes"] network_new['tx'] = netcounters["tx_bytes"] - self.netcounters_old[container_id]["tx_bytes"] network_new['cumulative_rx'] = netcounters["rx_bytes"] network_new['cumulative_tx'] = netcounters["tx_bytes"] # Save stats to compute next bitrate self.netcounters_old[container_id] = netcounters # Return the stats return network_new
def get_key(self, window): # Catch ESC key AND numlock key (issue #163) keycode = [0, 0] keycode[0] = window.getch() keycode[1] = window.getch() if keycode != [-1, -1]: logger.debug("Keypressed (code: %s)" % keycode) if keycode[0] == 27 and keycode[1] != -1: # Do not escape on specials keys return -1 else: return keycode[0]
def add_server(self, name, ip, port): """Add a new server to the list""" new_server = {'key': name, # Zeroconf name with both hostname and port 'name': name.split(':')[0], # Short name 'ip': ip, # IP address seen by the client 'port': port, # TCP port 'username': '******', # Default username 'password': '', # Default password 'status': 'UNKNOWN', # Server status: 'UNKNOWN', 'OFFLINE', 'ONLINE', 'PROTECTED' 'type': 'DYNAMIC', # Server type: 'STATIC' or 'DYNAMIC' } self._server_list.append(new_server) logger.debug("Updated servers list (%s servers): %s" % (len(self._server_list), self._server_list))
def set_plugins(self, input_plugins): """Set the plugin list according to the Glances server.""" header = "glances_" for item in input_plugins: # Import the plugin plugin = __import__(header + item) # Add the plugin to the dictionary # The key is the plugin name # for example, the file glances_xxx.py # generate self._plugins_list["xxx"] = ... logger.debug("Server uses {0} plugin".format(item)) self._plugins[item] = plugin.Plugin() # Restoring system path sys.path = sys_path
def export(self, name, columns, points): """Write the points in RabbitMQ.""" data = ('hostname=' + self.hostname + ', name=' + name + ', dateinfo=' + datetime.datetime.utcnow().isoformat()) for i in range(0, len(columns)): if not isinstance(points[i], Number): continue else: data += ", " + columns[i] + "=" + str(points[i]) logger.debug(data) try: self.client.basic_publish(exchange='', routing_key=self.rabbitmq_queue, body=data) except Exception as e: logger.error("Can not export stats to RabbitMQ (%s)" % e)
def add_server(self, name, ip, port): """Add a new server to the list.""" new_server = { 'key': name, # Zeroconf name with both hostname and port 'name': name.split(':')[0], # Short name 'ip': ip, # IP address seen by the client 'port': port, # TCP port 'username': '******', # Default username 'password': '', # Default password 'status': 'UNKNOWN', # Server status: 'UNKNOWN', 'OFFLINE', 'ONLINE', 'PROTECTED' 'type': 'DYNAMIC'} # Server type: 'STATIC' or 'DYNAMIC' self._server_list.append(new_server) logger.debug("Updated servers list (%s servers): %s" % (len(self._server_list), self._server_list))
def connect(self, version=None): """Connect to the Docker server""" # Init connection to the Docker API try: if version is None: ret = docker.Client(base_url='unix://var/run/docker.sock') else: ret = docker.Client(base_url='unix://var/run/docker.sock', version=version) except NameError: # docker lib not found return None try: ret.version() except requests.exceptions.ConnectionError as e: # Connexion error (Docker not detected) # Let this message in debug mode logger.debug("Can't connect to the Docker server (%s)" % e) return None except docker.errors.APIError as e: if version is None: # API error (Version mismatch ?) logger.debug("Docker API error (%s)" % e) # Try the connection with the server version import re version = re.search('server\:\ (.*)\)\".*\)', str(e)) if version: logger.debug("Try connection with Docker API version %s" % version.group(1)) ret = self.connect(version=version.group(1)) else: logger.debug("Can not retreive Docker server version") ret = None else: # API error logger.error("Docker API error (%s)" % e) ret = None except Exception as e: # Others exceptions... # Connexion error (Docker not detected) logger.error("Can't connect to the Docker server (%s)" % e) ret = None # Log an info if Docker plugin is disabled if ret is None: logger.debug( "Docker plugin is disable because an error has been detected") return ret
def fetch(self): """Fetch the data from hddtemp daemon.""" # Taking care of sudden deaths/stops of hddtemp daemon try: sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sck.connect((self.host, self.port)) data = sck.recv(4096) sck.close() except socket.error as e: logger.warning("Can not connect to an HDDtemp server ({0}:{1} => {2})".format(self.host, self.port, e)) logger.debug("Disable the HDDtemp module. Use the --disable-hddtemp to hide the previous message.") self.args.disable_hddtemp = True data = "" return data
def process_filter(self, value): """Set the process filter.""" logger.info("Set process filter to {0}".format(value)) self._process_filter = value if value is not None: try: self._process_filter_re = re.compile(value) logger.debug("Process filter regex compilation OK: {0}".format( self.process_filter)) except Exception: logger.error( "Cannot compile process filter regex: {0}".format(value)) self._process_filter_re = None else: self._process_filter_re = None
def load_limits(self, config): """Load limits from the configuration file, if it exists.""" if (hasattr(config, 'has_section') and config.has_section(self.plugin_name)): for level, _ in config.items(self.plugin_name): # Read limits limit = '_'.join([self.plugin_name, level]) try: self._limits[limit] = config.get_float_value( self.plugin_name, level) except ValueError: self._limits[limit] = config.get_value( self.plugin_name, level).split(",") logger.debug("Load limit: {0} = {1}".format( limit, self._limits[limit]))
def load_limits(self, config): """Load the limits from the configuration file.""" if (hasattr(config, 'has_section') and config.has_section(self.plugin_name)): for s, v in config.items(self.plugin_name): # Read limits try: self.limits[self.plugin_name + '_' + s] = config.get_option(self.plugin_name, s) except ValueError: self.limits[self.plugin_name + '_' + s] = config.get_raw_option( self.plugin_name, s).split(",") logger.debug("Load limit: {0} = {1}".format( self.plugin_name + '_' + s, self.limits[self.plugin_name + '_' + s]))