def update(self, stats): """Update stats in the CSV output file.""" # Get the stats all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export()) # Init data with timestamp (issue#708) if self.first_line: csv_header = ['timestamp'] csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')] # Loop over plugins to export for plugin in self.plugins_to_export(): if isinstance(all_stats[plugin], list): for stat in all_stats[plugin]: # First line: header if self.first_line: csv_header += ('{}_{}_{}'.format( plugin, self.get_item_key(stat), item) for item in stat) # Others lines: stats csv_data += itervalues(stat) elif isinstance(all_stats[plugin], dict): # First line: header if self.first_line: fieldnames = iterkeys(all_stats[plugin]) csv_header += ('{}_{}'.format(plugin, fieldname) for fieldname in fieldnames) # Others lines: stats csv_data += itervalues(all_stats[plugin]) # Export to CSV if self.first_line: self.writer.writerow(csv_header) self.first_line = False self.writer.writerow(csv_data) self.csv_file.flush()
def update(self, stats): """Update stats in the CSV output file.""" # Get the stats all_stats = stats.getAllExports() plugins = stats.getAllPlugins() # Init data with timestamp (issue#708) csv_header = ["timestamp"] csv_data = [time.strftime("%Y-%m-%d %H:%M:%S")] # Loop over available plugin for i, plugin in enumerate(plugins): if plugin in self.plugins_to_export(): if isinstance(all_stats[i], list): for stat in all_stats[i]: # First line: header if self.first_line: csv_header += ("{0}_{1}_{2}".format(plugin, self.get_item_key(stat), item) for item in stat) # Others lines: stats csv_data += itervalues(stat) elif isinstance(all_stats[i], dict): # First line: header if self.first_line: fieldnames = iterkeys(all_stats[i]) csv_header += ("{0}_{1}".format(plugin, fieldname) for fieldname in fieldnames) # Others lines: stats csv_data += itervalues(all_stats[i]) # Export to CSV if self.first_line: self.writer.writerow(csv_header) self.first_line = False self.writer.writerow(csv_data) self.csv_file.flush()
def get_stats_snmp(self, bulk=False, snmp_oid=None): """Update stats using SNMP. If bulk=True, use a bulk request instead of a get request. """ snmp_oid = snmp_oid or {} from glances.snmp import GlancesSNMPClient # Init the SNMP request clientsnmp = GlancesSNMPClient(host=self.args.client, port=self.args.snmp_port, version=self.args.snmp_version, community=self.args.snmp_community) # Process the SNMP request ret = {} if bulk: # Bulk request snmpresult = clientsnmp.getbulk_by_oid(0, 10, itervalues(*snmp_oid)) if len(snmp_oid) == 1: # Bulk command for only one OID # Note: key is the item indexed but the OID result for item in snmpresult: if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]): ret[iterkeys(snmp_oid)[0] + iterkeys(item)[0].split( itervalues(snmp_oid)[0])[1]] = itervalues(item)[0] else: # Build the internal dict with the SNMP result # Note: key is the first item in the snmp_oid index = 1 for item in snmpresult: item_stats = {} item_key = None for key in iterkeys(snmp_oid): oid = snmp_oid[key] + '.' + str(index) if oid in item: if item_key is None: item_key = item[oid] else: item_stats[key] = item[oid] if item_stats: ret[item_key] = item_stats index += 1 else: # Simple get request snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid)) # Build the internal dict with the SNMP result for key in iterkeys(snmp_oid): ret[key] = snmpresult[snmp_oid[key]] return ret
def get_stats_snmp(self, bulk=False, snmp_oid=None): """Update stats using SNMP. If bulk=True, use a bulk request instead of a get request. """ snmp_oid = snmp_oid or {} from glances.snmp import GlancesSNMPClient # Init the SNMP request clientsnmp = GlancesSNMPClient(host=self.args.client, port=self.args.snmp_port, version=self.args.snmp_version, community=self.args.snmp_community) # Process the SNMP request ret = {} if bulk: # Bulk request snmpresult = clientsnmp.getbulk_by_oid(0, 10, itervalues(*snmp_oid)) if len(snmp_oid) == 1: # Bulk command for only one OID # Note: key is the item indexed but the OID result for item in snmpresult: if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]): ret[iterkeys(snmp_oid)[0] + iterkeys(item) [0].split(itervalues(snmp_oid)[0])[1]] = itervalues(item)[0] else: # Build the internal dict with the SNMP result # Note: key is the first item in the snmp_oid index = 1 for item in snmpresult: item_stats = {} item_key = None for key in iterkeys(snmp_oid): oid = snmp_oid[key] + '.' + str(index) if oid in item: if item_key is None: item_key = item[oid] else: item_stats[key] = item[oid] if item_stats: ret[item_key] = item_stats index += 1 else: # Simple get request snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid)) # Build the internal dict with the SNMP result for key in iterkeys(snmp_oid): ret[key] = snmpresult[snmp_oid[key]] return ret
def update(self, stats): """Update stats in the CSV output file.""" # Get the stats all_stats = stats.getAllExportsAsDict( plugin_list=self.plugins_to_export()) # Init data with timestamp (issue#708) if self.first_line: csv_header = ['timestamp'] csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')] # Loop over plugins to export for plugin in self.plugins_to_export(): if isinstance(all_stats[plugin], list): for stat in all_stats[plugin]: # First line: header if self.first_line: csv_header += ('{}_{}_{}'.format( plugin, self.get_item_key(stat), item) for item in stat) # Others lines: stats csv_data += itervalues(stat) elif isinstance(all_stats[plugin], dict): # First line: header if self.first_line: fieldnames = iterkeys(all_stats[plugin]) csv_header += ('{}_{}'.format(plugin, fieldname) for fieldname in fieldnames) # Others lines: stats csv_data += itervalues(all_stats[plugin]) # Export to CSV # Manage header if self.first_line: if self.old_header is None: # New file, write the header on top on the CSV file self.writer.writerow(csv_header) # File already exist, check if header are compatible if self.old_header != csv_header: # Header are differents, log an error and do not write data logger.error( "Cannot append data to existing CSV file. Headers are differents." ) logger.debug("Old header: {}".format(self.old_header)) logger.debug("New header: {}".format(csv_header)) else: # Header are equals, ready to write data self.old_header = None # Only do this once self.first_line = False # Manage data if self.old_header is None: self.writer.writerow(csv_data) self.csv_file.flush()
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format( self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def __display_top(self, stat_display, stats): """Display the second line in the Curses interface. <QUICKLOOK> + CPU|PERCPU + <GPU> + MEM + SWAP + LOAD """ self.init_column() self.new_line() # Init quicklook stat_display['quicklook'] = {'msgdict': []} # Dict for plugins width plugin_widths = {} for p in self._top: plugin_widths[p] = self.get_stats_display_width(stat_display.get(p, 0)) if hasattr(self.args, 'disable_' + p) else 0 # Width of all plugins stats_width = sum(itervalues(plugin_widths)) # Number of plugin but quicklook stats_number = sum([int(stat_display[p]['msgdict'] != []) for p in self._top if not getattr(self.args, 'disable_' + p)]) if not self.args.disable_quicklook: # Quick look is in the place ! if self.args.full_quicklook: quicklook_width = self.screen.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column) else: quicklook_width = min(self.screen.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column), self._quicklook_max_width - 5) try: stat_display["quicklook"] = stats.get_plugin( 'quicklook').get_stats_display(max_width=quicklook_width, args=self.args) except AttributeError as e: logger.debug("Quicklook plugin not available (%s)" % e) else: plugin_widths['quicklook'] = self.get_stats_display_width(stat_display["quicklook"]) stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = 1 self.display_plugin(stat_display["quicklook"]) self.new_column() # Compute spaces between plugins # Note: Only one space between Quicklook and others plugin_display_optional = {} for p in self._top: plugin_display_optional[p] = True if stats_number > 1: self.space_between_column = max(1, int((self.screen.getmaxyx()[1] - stats_width) / (stats_number - 1))) for p in ['mem', 'cpu']: # No space ? Remove optional stats if self.space_between_column < 3: plugin_display_optional[p] = False plugin_widths[p] = self.get_stats_display_width(stat_display[p], without_option=True) if hasattr(self.args, 'disable_' + p) else 0 stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = max(1, int((self.screen.getmaxyx()[1] - stats_width) / (stats_number - 1))) else: self.space_between_column = 0 # Display CPU, MEM, SWAP and LOAD for p in self._top: if p == 'quicklook': continue if p in stat_display: self.display_plugin(stat_display[p], display_optional=plugin_display_optional[p]) if p is not 'load': # Skip last column self.new_column() # Space between column self.space_between_column = 3 # Backup line position self.saved_line = self.next_line
def exit(self): """Overwrite the exit method to close threads""" for t in itervalues(self.thread_list): t.stop() # Call the father class super(Plugin, self).exit()
def exit(self): """Overwrite the exit method to close threads""" logger.debug("Stop the Docker plugin") for t in itervalues(self.thread_list): t.stop()
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()