def export(self, name, columns, points): """Write the points to the Prometheus exporter using Gauge.""" logger.debug("Export {} stats to Prometheus exporter".format(name)) # Remove non number stats and convert all to float (for Boolean) data = {k: float(v) for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number)} # Write metrics to the Prometheus exporter for k, v in iteritems(data): # Prometheus metric name: prefix_<glances stats name> metric_name = self.prefix + self.METRIC_SEPARATOR + name + self.METRIC_SEPARATOR + k # Prometheus is very sensible to the metric name # See: https://prometheus.io/docs/practices/naming/ for c in ['.', '-', '/', ' ']: metric_name = metric_name.replace(c, self.METRIC_SEPARATOR) # Get the labels labels = self.parse_tags(self.labels) # Manage an internal dict between metric name and Gauge if metric_name not in self._metric_dict: self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels)) # Write the value if hasattr(self._metric_dict[metric_name], 'labels'): # Add the labels (see issue #1255) self._metric_dict[metric_name].labels(**labels).set(v) else: self._metric_dict[metric_name].set(v)
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using service {}'.format( self.NAME, self.get('service_cmd'))) try: res = check_output(self.get('service_cmd').split(), stderr=STDOUT).decode('utf-8') except OSError as e: logger.debug('{}: Error while executing service ({})'.format( self.NAME, e)) else: status = {'running': 0, 'stopped': 0, 'upstart': 0} # For each line for r in res.split('\n'): # Split per space .* l = r.split() if len(l) < 4: continue if l[1] == '+': status['running'] += 1 elif l[1] == '-': status['stopped'] += 1 elif l[1] == '?': status['upstart'] += 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def build_sensors_list(self, type): """Build the sensors list depending of the type. type: SENSOR_TEMP_UNIT or SENSOR_FAN_UNIT output: a list""" ret = [] if type == SENSOR_TEMP_UNIT and self.init_temp: input_list = self.stemps self.stemps = psutil.sensors_temperatures() elif type == SENSOR_FAN_UNIT and self.init_fan: input_list = self.sfans self.sfans = psutil.sensors_fans() else: return ret for chipname, chip in iteritems(input_list): i = 1 for feature in chip: sensors_current = {} # Sensor name if feature.label == '': sensors_current['label'] = chipname + ' ' + str(i) else: sensors_current['label'] = feature.label # Fan speed and unit sensors_current['value'] = int(feature.current) sensors_current['unit'] = type # Add sensor to the list ret.append(sensors_current) i += 1 return ret
def export(self, title, data): """Generate graph from the data. Example for the mem plugin: {'percent': [ (datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8), (datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9), (datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0), ... ] } Return: * True if the graph have been generated * False if the graph have not been generated """ if data == {}: return False chart = DateTimeLine(title=title.capitalize(), width=self.width, height=self.height, style=self.style, show_dots=False, legend_at_bottom=True, x_label_rotation=20, x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S')) for k, v in iteritems(time_serie_subsample(data, self.width)): chart.add(k, v) chart.render_to_file(os.path.join(self.path, title + '.svg')) return True
def update(self): """Update load stats.""" # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # Get the load using the os standard lib try: load = os.getloadavg() except (OSError, AttributeError): self.stats = {} else: self.stats = {'min1': load[0], 'min5': load[1], 'min15': load[2], 'cpucore': self.nb_log_core} elif self.input_method == 'snmp': # Update stats using SNMP self.stats = self.get_stats_snmp(snmp_oid=snmp_oid) if self.stats['min1'] == '': self.reset() return self.stats # Python 3 return a dict like: # {'min1': "b'0.08'", 'min5': "b'0.12'", 'min15': "b'0.15'"} for k, v in iteritems(self.stats): self.stats[k] = float(v) self.stats['cpucore'] = self.nb_log_core return self.stats
def export(self, title, data): """Generate graph from the data. Example for the mem plugin: {'percent': [ (datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8), (datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9), (datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0), (datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0), ... ] } Return: * True if the graph have been generated * False if the graph have not been generated """ if data == {}: return False chart = DateTimeLine( title=title.capitalize(), width=self.width, height=self.height, style=self.style, show_dots=False, legend_at_bottom=True, x_label_rotation=20, x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S')) for k, v in iteritems(time_serie_subsample(data, self.width)): chart.add(k, v) chart.render_to_file(os.path.join(self.path, title + '.svg')) return True
def update(self): """Update the AMP list.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': for k, v in iteritems(self.glances_amps.update()): stats.append( { 'key': k, 'name': v.NAME, 'result': v.result(), 'refresh': v.refresh(), 'timer': v.time_until_refresh(), 'count': v.count(), 'countmin': v.count_min(), 'countmax': v.count_max(), 'regex': v.regex() is not None }, ) else: # Not available in SNMP mode pass # Update the stats self.stats = stats return self.stats
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using systemctl {}'.format( self.NAME, self.get('systemctl_cmd'))) try: res = check_output(self.get('systemctl_cmd').split()) except (OSError, CalledProcessError) as e: logger.debug('{}: Error while executing systemctl ({})'.format( self.NAME, e)) else: status = {} # For each line for r in to_ascii(res).split('\n')[1:-8]: # Split per space .* column = r.split() if len(column) > 3: # load column for c in range(1, 3): try: status[column[c]] += 1 except KeyError: status[column[c]] = 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using systemctl {}'.format(self.NAME, self.get('systemctl_cmd'))) try: res = check_output(self.get('systemctl_cmd').split()) except OSError as e: logger.debug('{}: Error while executing systemctl ({})'.format(self.NAME, e)) else: status = {} # For each line for r in res.split('\n')[1:-8]: # Split per space .* l = r.split() if len(l) > 3: # load column for c in range(1, 3): try: status[l[c]] += 1 except KeyError: status[l[c]] = 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def update(self): """Update the command result attributed.""" # Search application monitored processes by a regular expression processlist = glances_processes.getalllist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): if not v.enable(): # Do not update if the enable tag is set continue try: amps_list = [p for p in processlist for c in p['cmdline'] if re.search(v.regex(), c) is not None] except TypeError: continue if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} process detected (PID={})".format(k, amps_list[0]['pid'])) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" is countmin is defined v.set_result("No running process") return self.__amps_dict
def update(self): """Update the host/system info using the input method. Return the stats (dict) """ # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib self.stats['os_name'] = platform.system() self.stats['hostname'] = platform.node() self.stats['platform'] = platform.architecture()[0] if self.stats['os_name'] == "Linux": linux_distro = platform.linux_distribution() if linux_distro[0] == '': self.stats['linux_distro'] = _linux_os_release() else: self.stats['linux_distro'] = ' '.join(linux_distro[:2]) self.stats['os_version'] = platform.release() elif self.stats['os_name'].endswith('BSD'): self.stats['os_version'] = platform.release() elif self.stats['os_name'] == "Darwin": self.stats['os_version'] = platform.mac_ver()[0] elif self.stats['os_name'] == "Windows": os_version = platform.win32_ver() self.stats['os_version'] = ' '.join(os_version[::2]) # if the python version is 32 bit perhaps the windows operating # system is 64bit if self.stats['platform'] == '32bit' and 'PROCESSOR_ARCHITEW6432' in os.environ: self.stats['platform'] = '64bit' else: self.stats['os_version'] = "" # Add human readable name if self.stats['os_name'] == "Linux": self.stats['hr_name'] = self.stats['linux_distro'] else: self.stats['hr_name'] = '{} {}'.format( self.stats['os_name'], self.stats['os_version']) self.stats['hr_name'] += ' {}'.format(self.stats['platform']) elif self.input_method == 'snmp': # Update stats using SNMP try: self.stats = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name]) except KeyError: self.stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) # Default behavor: display all the information self.stats['os_name'] = self.stats['system_name'] # Windows OS tips if self.short_system_name == 'windows': for r, v in iteritems(snmp_to_human['windows']): if re.search(r, self.stats['system_name']): self.stats['os_name'] = v break # Add human readable name self.stats['hr_name'] = self.stats['os_name'] return self.stats
def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in iterkeys(stats): pre_key = '{}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict for key, value in iteritems(stats): if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values return export_names, export_values
def update(self): """Update the command result attributed.""" # Get the current processes list (once) processlist = glances_processes.getlist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): if not v.enable(): # Do not update if the enable tag is set continue amps_list = self._build_amps_list(v, processlist) if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} processes {} detected ({})".format(len(amps_list), k, amps_list)) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" if countmin is defined v.set_result("No running process") return self.__amps_dict
def __build_export(self, stats): """Build the export lists.""" export_names = [] export_values = [] if isinstance(stats, dict): # Stats is a dict # Is there a key ? if 'key' in iterkeys(stats): pre_key = '{0}.'.format(stats[stats['key']]) else: pre_key = '' # Walk through the dict for key, value in iteritems(stats): if isinstance(value, list): try: value = value[0] except IndexError: value = '' if isinstance(value, dict): item_names, item_values = self.__build_export(value) item_names = [pre_key + key.lower() + str(i) for i in item_names] export_names += item_names export_values += item_values else: export_names.append(pre_key + key.lower()) export_values.append(value) elif isinstance(stats, list): # Stats is a list (of dict) # Recursive loop through the list for item in stats: item_names, item_values = self.__build_export(item) export_names += item_names export_values += item_values return export_names, export_values
def update(self): """Update the command result attributed.""" # Search application monitored processes by a regular expression processlist = glances_processes.getalllist() # Iter upon the AMPs dict for k, v in iteritems(self.get()): if not v.enable(): # Do not update if the enable tag is set continue try: amps_list = [ p for p in processlist for c in p['cmdline'] if re.search(v.regex(), c) is not None ] except TypeError: continue if len(amps_list) > 0: # At least one process is matching the regex logger.debug("AMPS: {} process detected (PID={})".format( k, amps_list[0]['pid'])) # Call the AMP update method thread = threading.Thread(target=v.update_wrapper, args=[amps_list]) thread.start() else: # Set the process number to 0 v.set_count(0) if v.count_min() is not None and v.count_min() > 0: # Only display the "No running process message" is countmin is defined v.set_result("No running process") return self.__amps_dict
def update(self, process_list): """Update the AMP""" # Get the systemctl status logger.debug('{}: Update stats using service {}'.format(self.NAME, self.get('service_cmd'))) try: res = check_output(self.get('service_cmd').split(), stderr=STDOUT).decode('utf-8') except OSError as e: logger.debug('{}: Error while executing service ({})'.format(self.NAME, e)) else: status = {'running': 0, 'stopped': 0, 'upstart': 0} # For each line for r in res.split('\n'): # Split per space .* l = r.split() if len(l) < 4: continue if l[1] == '+': status['running'] += 1 elif l[1] == '-': status['stopped'] += 1 elif l[1] == '?': status['upstart'] += 1 # Build the output (string) message output = 'Services\n' for k, v in iteritems(status): output += '{}: {}\n'.format(k, v) self.set_result(output, separator=' ') return self.result()
def build_tree(process_dict, sort_key, sort_reverse, hide_kernel_threads, excluded_processes): """Build a process tree using using parent/child relationships. Return the tree root node. """ tree_root = ProcessTreeNode(root=True) nodes_to_add_last = collections.deque() # first pass: add nodes whose parent are in the tree for process, stats in iteritems(process_dict): new_node = ProcessTreeNode(process, stats, sort_key, sort_reverse) try: parent_process = process.parent() except psutil.NoSuchProcess: # parent is dead, consider no parent parent_process = None if (parent_process is None) or (parent_process in excluded_processes): # no parent, or excluded parent, add this node at the top level tree_root.children.append(new_node) else: parent_node = tree_root.find_process(parent_process) if parent_node is not None: # parent is already in the tree, add a new child parent_node.children.append(new_node) else: # parent is not in tree, add this node later nodes_to_add_last.append(new_node) # next pass(es): add nodes to their parents if it could not be done in # previous pass while nodes_to_add_last: # pop from left and append to right to avoid infinite loop node_to_add = nodes_to_add_last.popleft() try: parent_process = node_to_add.process.parent() except psutil.NoSuchProcess: # parent is dead, consider no parent, add this node at the top # level tree_root.children.append(node_to_add) else: if (parent_process is None) or (parent_process in excluded_processes): # no parent, or excluded parent, add this node at the top level tree_root.children.append(node_to_add) else: parent_node = tree_root.find_process(parent_process) if parent_node is not None: # parent is already in the tree, add a new child parent_node.children.append(node_to_add) else: # parent is not in tree, add this node later nodes_to_add_last.append(node_to_add) return tree_root
def get_system_name(self, oid_system_name): """Get the short os name from the OS name OID string.""" short_system_name = None if oid_system_name == '': return short_system_name # Find the short name in the oid_to_short_os_name dict for r, v in iteritems(oid_to_short_system_name): if re.search(r, oid_system_name): short_system_name = v break return short_system_name
def update(self): """Update load stats.""" # Reset stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # Get the load using the os standard lib try: load = os.getloadavg() except (OSError, AttributeError): self.stats = {} else: self.stats = { 'min1': load[0], 'min5': load[1], 'min15': load[2], 'cpucore': self.nb_log_core } elif self.input_method == 'snmp': # Update stats using SNMP self.stats = self.get_stats_snmp(snmp_oid=snmp_oid) if self.stats['min1'] == '': self.reset() return self.stats # Python 3 return a dict like: # {'min1': "b'0.08'", 'min5': "b'0.12'", 'min15': "b'0.15'"} for k, v in iteritems(self.stats): self.stats[k] = float(v) self.stats['cpucore'] = self.nb_log_core # Update the history list self.update_stats_history() # Update the view self.update_views() return self.stats
def run(self): """Function called to grab stats. Infinite loop, should be stopped by calling the stop() method""" for k, v in iteritems(self.AWS_EC2_API_METADATA): r_url = '{}/{}'.format(self.AWS_EC2_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except requests.exceptions.ConnectTimeout: logger.debug( 'cloud plugin - Connection to {} timed out'.format(r_url)) break except Exception as e: logger.debug( 'cloud plugin - Cannot connect to the AWS EC2 API {}: {}'. format(r_url, e)) break else: if r.ok: self._stats[k] = r.content
def update(self): """Update the AMP list.""" # Reset stats self.reset() if self.input_method == 'local': for k, v in iteritems(self.glances_amps.update()): # self.stats.append({k: v.result()}) self.stats.append({'key': k, 'name': v.NAME, 'result': v.result(), 'refresh': v.refresh(), 'timer': v.time_until_refresh(), 'count': v.count(), 'countmin': v.count_min(), 'countmax': v.count_max()}) else: # Not available in SNMP mode pass return self.stats
def update(self): """Update the AMP list.""" # Reset stats self.reset() if self.input_method == 'local': for k, v in iteritems(self.glances_amps.update()): # self.stats.append({k: v.result()}) self.stats.append({'key': k, 'name': v.NAME, 'result': v.result(), 'refresh': v.refresh(), 'timer': v.time_until_refresh(), 'count': v.count(), 'countmin': v.count_min(), 'countmax': v.count_max(), }) else: # Not available in SNMP mode pass return self.stats
def run(self): """Function called to grab stats. Infinite loop, should be stopped by calling the stop() method""" if not cloud_tag: logger.debug("cloud plugin - Requests lib is not installed") self.stop() return False for k, v in iteritems(self.AWS_EC2_API_METADATA): r_url = '{}/{}'.format(self.AWS_EC2_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except Exception as e: logger.debug('cloud plugin - Cannot connect to the AWS EC2 API {}: {}'.format(r_url, e)) break else: if r.ok: self._stats[k] = r.content return True
def run(self): """Grab plugin's stats. Infinite loop, should be stopped by calling the stop() method """ if import_error_tag: self.stop() return False for k, v in iteritems(self.OPENSTACK_API_METADATA): r_url = '{}/{}'.format(self.OPENSTACK_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except Exception as e: logger.debug('cloud plugin - Cannot connect to the OpenStack metadata API {}: {}'.format(r_url, e)) break else: if r.ok: self._stats[k] = to_ascii(r.content) return True
def update(self): """Update load stats.""" # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib # Get the load using the os standard lib load = self._getloadavg() if load is None: stats = self.get_init_value() else: stats = { 'min1': load[0], 'min5': load[1], 'min15': load[2], 'cpucore': self.nb_log_core } elif self.input_method == 'snmp': # Update stats using SNMP stats = self.get_stats_snmp(snmp_oid=snmp_oid) if stats['min1'] == '': stats = self.get_init_value() return stats # Python 3 return a dict like: # {'min1': "b'0.08'", 'min5': "b'0.12'", 'min15': "b'0.15'"} for k, v in iteritems(stats): stats[k] = float(v) stats['cpucore'] = self.nb_log_core # Update the stats self.stats = stats return self.stats
def export(self, name, columns, points): """Export the stats to the kdb+ host/port.""" # Remove non number stats and convert all numbers to float like prometheus data = { k: v for (k, v) in iteritems(dict(zip(columns, points))) if isinstance(v, Number) } # Append all tables name with self.prefix try: self.client.sendAsync( "{if[count z; x insert flip (`time, y)! .z.t, (),/: z]}", np.string_(f"{self.prefix}{name.capitalize()}"), qlist(listkeys(data), qtype=QSYMBOL_LIST), qlist(listvalues(data), qtype=QDOUBLE_LIST), ) except Exception as e: logger.error(f"Cannot export stats <{name}> to kdb+ ({e})") logger.debug(f"Exported <{name}> stats to kdb+")
def run(self): """Function called to grab stats. Infinite loop, should be stopped by calling the stop() method""" if not cloud_tag: logger.debug("cloud plugin - Requests lib is not installed") self.stop() return False for k, v in iteritems(self.AWS_EC2_API_METADATA): r_url = '{}/{}'.format(self.AWS_EC2_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except Exception as e: logger.debug( 'cloud plugin - Cannot connect to the AWS EC2 API {}: {}'. format(r_url, e)) break else: if r.ok: self._stats[k] = r.content return True
def run(self): """Grab plugin's stats. Infinite loop, should be stopped by calling the stop() method """ if import_error_tag: self.stop() return False for k, v in iteritems(self.OPENSTACK_API_METADATA): r_url = '{}/{}'.format(self.OPENSTACK_API_URL, v) try: # Local request, a timeout of 3 seconds is OK r = requests.get(r_url, timeout=3) except Exception as e: logger.debug( 'cloud plugin - Cannot connect to the OpenStack metadata API {}: {}' .format(r_url, e)) break else: if r.ok: self._stats[k] = to_ascii(r.content) return True
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = { 'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0 } # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread( proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats( proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree( processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format( self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def update(self): """Update the processes stats.""" # Reset the stats self.processlist = [] self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0} # Do not process if disable tag is set if self.disable_tag: return # Get the time since last update time_since_update = getTimeSinceLastUpdate('process_disk') # Build an internal dict with only mandatories stats (sort keys) processdict = {} excluded_processes = set() for proc in psutil.process_iter(): # Ignore kernel threads if needed if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc): continue # If self.max_processes is None: Only retreive mandatory stats # Else: retreive mandatory and standard stats s = self.__get_process_stats(proc, mandatory_stats=True, standard_stats=self.max_processes is None) # Continue to the next process if it has to be filtered if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])): excluded_processes.add(proc) continue # Ok add the process to the list processdict[proc] = s # ignore the 'idle' process on Windows and *BSD # ignore the 'kernel_task' process on OS X # waiting for upstream patch from psutil if (BSD and processdict[proc]['name'] == 'idle' or WINDOWS and processdict[proc]['name'] == 'System Idle Process' or OSX and processdict[proc]['name'] == 'kernel_task'): continue # Update processcount (global statistics) try: self.processcount[str(proc.status())] += 1 except KeyError: # Key did not exist, create it try: self.processcount[str(proc.status())] = 1 except psutil.NoSuchProcess: pass except psutil.NoSuchProcess: pass else: self.processcount['total'] += 1 # Update thread number (global statistics) try: self.processcount['thread'] += proc.num_threads() except Exception: pass if self._enable_tree: self.process_tree = ProcessTreeNode.build_tree(processdict, self.sort_key, self.sort_reverse, self.no_kernel_threads, excluded_processes) for i, node in enumerate(self.process_tree): # Only retreive stats for visible processes (max_processes) if self.max_processes is not None and i >= self.max_processes: break # add standard stats new_stats = self.__get_process_stats(node.process, mandatory_stats=False, standard_stats=True, extended_stats=False) if new_stats is not None: node.stats.update(new_stats) # Add a specific time_since_update stats for bitrate node.stats['time_since_update'] = time_since_update else: # Process optimization # Only retreive stats for visible processes (max_processes) if self.max_processes is not None: # Sort the internal dict and cut the top N (Return a list of tuple) # tuple=key (proc), dict (returned by __get_process_stats) try: processiter = sorted(iteritems(processdict), key=lambda x: x[1][self.sort_key], reverse=self.sort_reverse) except (KeyError, TypeError) as e: logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e)) logger.error('{0}'.format(listitems(processdict)[0])) # Fallback to all process (issue #423) processloop = iteritems(processdict) first = False else: processloop = processiter[0:self.max_processes] first = True else: # Get all processes stats processloop = iteritems(processdict) first = False for i in processloop: # Already existing mandatory stats procstat = i[1] if self.max_processes is not None: # Update with standard stats # and extended stats but only for TOP (first) process s = self.__get_process_stats(i[0], mandatory_stats=False, standard_stats=True, extended_stats=first) if s is None: continue procstat.update(s) # Add a specific time_since_update stats for bitrate procstat['time_since_update'] = time_since_update # Update process list self.processlist.append(procstat) # Next... first = False # Build the all processes list used by the monitored list self.allprocesslist = itervalues(processdict) # Clean internals caches if timeout is reached if self.cache_timer.finished(): self.username_cache = {} self.cmdline_cache = {} # Restart the timer self.cache_timer.reset()
def get_process_curses_data(self, p, first, args): """Get curses data to display for a process.""" ret = [self.curse_new_line()] # CPU if 'cpu_percent' in p and p['cpu_percent'] is not None and p['cpu_percent'] != '': if args.disable_irix and self.nb_log_core != 0: msg = '{0:>6.1f}'.format(p['cpu_percent'] / float(self.nb_log_core)) else: msg = '{0:>6.1f}'.format(p['cpu_percent']) ret.append(self.curse_add_line(msg, self.get_alert(p['cpu_percent'], header="cpu"))) else: msg = '{0:>6}'.format('?') ret.append(self.curse_add_line(msg)) # MEM if 'memory_percent' in p and p['memory_percent'] is not None and p['memory_percent'] != '': msg = '{0:>6.1f}'.format(p['memory_percent']) ret.append(self.curse_add_line(msg, self.get_alert(p['memory_percent'], header="mem"))) else: msg = '{0:>6}'.format('?') ret.append(self.curse_add_line(msg)) # VMS/RSS if 'memory_info' in p and p['memory_info'] is not None and p['memory_info'] != '': # VMS msg = '{0:>6}'.format(self.auto_unit(p['memory_info'][1], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) # RSS msg = '{0:>6}'.format(self.auto_unit(p['memory_info'][0], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) else: msg = '{0:>6}'.format('?') ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line(msg)) # PID msg = '{0:>6}'.format(p['pid']) ret.append(self.curse_add_line(msg)) # USER if 'username' in p: # docker internal users are displayed as ints only, therefore str() msg = ' {0:9}'.format(str(p['username'])[:9]) ret.append(self.curse_add_line(msg)) else: msg = ' {0:9}'.format('?') ret.append(self.curse_add_line(msg)) # NICE if 'nice' in p: nice = p['nice'] if nice is None: nice = '?' msg = '{0:>5}'.format(nice) if isinstance(nice, int) and ((WINDOWS and nice != 32) or (not WINDOWS and nice != 0)): ret.append(self.curse_add_line(msg, decoration='NICE')) else: ret.append(self.curse_add_line(msg)) else: msg = '{0:>5}'.format('?') ret.append(self.curse_add_line(msg)) # STATUS if 'status' in p: status = p['status'] msg = '{0:>2}'.format(status) if status == 'R': ret.append(self.curse_add_line(msg, decoration='STATUS')) else: ret.append(self.curse_add_line(msg)) else: msg = '{0:>2}'.format('?') ret.append(self.curse_add_line(msg)) # TIME+ if self.tag_proc_time: try: delta = timedelta(seconds=sum(p['cpu_times'])) except (OverflowError, TypeError) as e: # Catch OverflowError on some Amazon EC2 server # See https://github.com/nicolargo/glances/issues/87 # Also catch TypeError on Mac OS X # See: https://github.com/nicolargo/glances/issues/622 logger.debug("Cannot get TIME+ ({0})".format(e)) self.tag_proc_time = False else: hours, minutes, seconds, microseconds = convert_timedelta(delta) if hours: msg = '{0:>4}h'.format(hours) ret.append(self.curse_add_line(msg, decoration='CPU_TIME', optional=True)) msg = '{0}:{1}'.format(str(minutes).zfill(2), seconds) else: msg = '{0:>4}:{1}.{2}'.format(minutes, seconds, microseconds) else: msg = '{0:>10}'.format('?') ret.append(self.curse_add_line(msg, optional=True)) # IO read/write if 'io_counters' in p: # IO read io_rs = int((p['io_counters'][0] - p['io_counters'][2]) / p['time_since_update']) if io_rs == 0: msg = '{0:>6}'.format("0") else: msg = '{0:>6}'.format(self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # IO write io_ws = int((p['io_counters'][1] - p['io_counters'][3]) / p['time_since_update']) if io_ws == 0: msg = '{0:>6}'.format("0") else: msg = '{0:>6}'.format(self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) else: msg = '{0:>6}'.format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # Command line # If no command line for the process is available, fallback to # the bare process name instead cmdline = p['cmdline'] try: # XXX: remove `cmdline != ['']` when we'll drop support for psutil<4.0.0 if cmdline and cmdline != ['']: path, cmd = os.path.split(cmdline[0]) if os.path.isdir(path) and not args.process_short_name: msg = ' {0}'.format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) ret.append(self.curse_add_line(cmd, decoration='PROCESS', splittable=True)) else: msg = ' {0}'.format(cmd) ret.append(self.curse_add_line(msg, decoration='PROCESS', splittable=True)) arguments = ' '.join(cmdline[1:]).replace('\n', ' ') msg = ' {0}'.format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = ' {0}'.format(p['name']) ret.append(self.curse_add_line(msg, splittable=True)) except UnicodeEncodeError: ret.append(self.curse_add_line('', splittable=True)) # Add extended stats but only for the top processes # !!! CPU consumption ??? # TODO: extended stats into the web interface if first and 'extended_stats' in p: # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p['cpu_affinity'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'CPU affinity: ' + str(len(p['cpu_affinity'])) + ' cores' ret.append(self.curse_add_line(msg, splittable=True)) # Second line is memory info if 'memory_info_ex' in p and p['memory_info_ex'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'Memory info: ' for k, v in iteritems(p['memory_info_ex']._asdict()): # Ignore rss and vms (already displayed) if k not in ['rss', 'vms'] and v is not None: msg += k + ' ' + self.auto_unit(v, low_precision=False) + ' ' if 'memory_swap' in p and p['memory_swap'] is not None: msg += 'swap ' + self.auto_unit(p['memory_swap'], low_precision=False) ret.append(self.curse_add_line(msg, splittable=True)) # Third line is for open files/network sessions msg = '' if 'num_threads' in p and p['num_threads'] is not None: msg += 'threads ' + str(p['num_threads']) + ' ' if 'num_fds' in p and p['num_fds'] is not None: msg += 'files ' + str(p['num_fds']) + ' ' if 'num_handles' in p and p['num_handles'] is not None: msg += 'handles ' + str(p['num_handles']) + ' ' if 'tcp' in p and p['tcp'] is not None: msg += 'TCP ' + str(p['tcp']) + ' ' if 'udp' in p and p['udp'] is not None: msg += 'UDP ' + str(p['udp']) + ' ' if msg != '': ret.append(self.curse_new_line()) msg = xpad + 'Open: ' + msg ret.append(self.curse_add_line(msg, splittable=True)) # Fouth line is IO nice level (only Linux and Windows OS) if 'ionice' in p and p['ionice'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'IO nice: ' k = 'Class is ' v = p['ionice'].ioclass # Linux: The scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Windows: On Windows only ioclass is used and it can be set to 2 (normal), 1 (low) or 0 (very low). if WINDOWS: if v == 0: msg += k + 'Very Low' elif v == 1: msg += k + 'Low' elif v == 2: msg += 'No specific I/O priority' else: msg += k + str(v) else: if v == 0: msg += 'No specific I/O priority' elif v == 1: msg += k + 'Real Time' elif v == 2: msg += k + 'Best Effort' elif v == 3: msg += k + 'IDLE' else: msg += k + str(v) # value is a number which goes from 0 to 7. # The higher the value, the lower the I/O priority of the process. if hasattr(p['ionice'], 'value') and p['ionice'].value != 0: msg += ' (value %s/7)' % str(p['ionice'].value) ret.append(self.curse_add_line(msg, splittable=True)) return ret
def update(self): """Update the host/system info using the input method. Return the stats (dict) """ # Init new stats stats = self.get_init_value() if self.input_method == 'local': # Update stats using the standard system lib stats['os_name'] = platform.system() stats['hostname'] = platform.node() stats['platform'] = platform.architecture()[0] if stats['os_name'] == "Linux": try: linux_distro = platform.linux_distribution() except AttributeError: stats['linux_distro'] = _linux_os_release() else: if linux_distro[0] == '': stats['linux_distro'] = _linux_os_release() else: stats['linux_distro'] = ' '.join(linux_distro[:2]) stats['os_version'] = platform.release() elif (stats['os_name'].endswith('BSD') or stats['os_name'] == 'SunOS'): stats['os_version'] = platform.release() elif stats['os_name'] == "Darwin": stats['os_version'] = platform.mac_ver()[0] elif stats['os_name'] == "Windows": os_version = platform.win32_ver() stats['os_version'] = ' '.join(os_version[::2]) # if the python version is 32 bit perhaps the windows operating # system is 64bit if stats[ 'platform'] == '32bit' and 'PROCESSOR_ARCHITEW6432' in os.environ: stats['platform'] = '64bit' else: stats['os_version'] = "" # Add human readable name if stats['os_name'] == "Linux": stats['hr_name'] = stats['linux_distro'] else: stats['hr_name'] = '{} {}'.format(stats['os_name'], stats['os_version']) stats['hr_name'] += ' {}'.format(stats['platform']) elif self.input_method == 'snmp': # Update stats using SNMP try: stats = self.get_stats_snmp( snmp_oid=snmp_oid[self.short_system_name]) except KeyError: stats = self.get_stats_snmp(snmp_oid=snmp_oid['default']) # Default behavor: display all the information stats['os_name'] = stats['system_name'] # Windows OS tips if self.short_system_name == 'windows': for r, v in iteritems(snmp_to_human['windows']): if re.search(r, stats['system_name']): stats['os_name'] = v break # Add human readable name stats['hr_name'] = stats['os_name'] # Update the stats self.stats = stats return self.stats
def get_process_curses_data(self, p, first, args): """Get curses data to display for a process.""" ret = [self.curse_new_line()] # CPU if 'cpu_percent' in p and p[ 'cpu_percent'] is not None and p['cpu_percent'] != '': if args.disable_irix and self.nb_log_core != 0: msg = '{:>6.1f}'.format(p['cpu_percent'] / float(self.nb_log_core)) else: msg = '{:>6.1f}'.format(p['cpu_percent']) ret.append( self.curse_add_line( msg, self.get_alert(p['cpu_percent'], header="cpu"))) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) # MEM if 'memory_percent' in p and p[ 'memory_percent'] is not None and p['memory_percent'] != '': msg = '{:>6.1f}'.format(p['memory_percent']) ret.append( self.curse_add_line( msg, self.get_alert(p['memory_percent'], header="mem"))) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) # VMS/RSS if 'memory_info' in p and p[ 'memory_info'] is not None and p['memory_info'] != '': # VMS msg = '{:>6}'.format( self.auto_unit(p['memory_info'][1], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) # RSS msg = '{:>6}'.format( self.auto_unit(p['memory_info'][0], low_precision=False)) ret.append(self.curse_add_line(msg, optional=True)) else: msg = '{:>6}'.format('?') ret.append(self.curse_add_line(msg)) ret.append(self.curse_add_line(msg)) # PID msg = '{:>6}'.format(p['pid']) ret.append(self.curse_add_line(msg)) # USER if 'username' in p: # docker internal users are displayed as ints only, therefore str() # Correct issue #886 on Windows OS msg = ' {:9}'.format(str(p['username'])[:9]) ret.append(self.curse_add_line(msg)) else: msg = ' {:9}'.format('?') ret.append(self.curse_add_line(msg)) # NICE if 'nice' in p: nice = p['nice'] if nice is None: nice = '?' msg = '{:>5}'.format(nice) if isinstance(nice, int) and ((WINDOWS and nice != 32) or (not WINDOWS and nice != 0)): ret.append(self.curse_add_line(msg, decoration='NICE')) else: ret.append(self.curse_add_line(msg)) else: msg = '{:>5}'.format('?') ret.append(self.curse_add_line(msg)) # STATUS if 'status' in p: status = p['status'] msg = '{:>2}'.format(status) if status == 'R': ret.append(self.curse_add_line(msg, decoration='STATUS')) else: ret.append(self.curse_add_line(msg)) else: msg = '{:>2}'.format('?') ret.append(self.curse_add_line(msg)) # TIME+ if self.tag_proc_time: try: delta = timedelta(seconds=sum(p['cpu_times'])) except (OverflowError, TypeError) as e: # Catch OverflowError on some Amazon EC2 server # See https://github.com/nicolargo/glances/issues/87 # Also catch TypeError on Mac OS X # See: https://github.com/nicolargo/glances/issues/622 logger.debug("Cannot get TIME+ ({})".format(e)) self.tag_proc_time = False else: hours, minutes, seconds, microseconds = convert_timedelta( delta) if hours: msg = '{:>4}h'.format(hours) ret.append( self.curse_add_line(msg, decoration='CPU_TIME', optional=True)) msg = '{}:{}'.format(str(minutes).zfill(2), seconds) else: msg = '{:>4}:{}.{}'.format(minutes, seconds, microseconds) else: msg = '{:>10}'.format('?') ret.append(self.curse_add_line(msg, optional=True)) # IO read/write if 'io_counters' in p: # IO read io_rs = int((p['io_counters'][0] - p['io_counters'][2]) / p['time_since_update']) if io_rs == 0: msg = '{:>6}'.format("0") else: msg = '{:>6}'.format(self.auto_unit(io_rs, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # IO write io_ws = int((p['io_counters'][1] - p['io_counters'][3]) / p['time_since_update']) if io_ws == 0: msg = '{:>6}'.format("0") else: msg = '{:>6}'.format(self.auto_unit(io_ws, low_precision=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) else: msg = '{:>6}'.format("?") ret.append(self.curse_add_line(msg, optional=True, additional=True)) ret.append(self.curse_add_line(msg, optional=True, additional=True)) # Command line # If no command line for the process is available, fallback to # the bare process name instead cmdline = p['cmdline'] try: # XXX: remove `cmdline != ['']` when we'll drop support for psutil<4.0.0 if cmdline and cmdline != ['']: path, cmd, arguments = split_cmdline(cmdline) if os.path.isdir(path) and not args.process_short_name: msg = ' {}'.format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) if glances_processes.is_tree_enabled(): # mark position to add tree decoration ret[-1]["_tree_decoration"] = True ret.append( self.curse_add_line(cmd, decoration='PROCESS', splittable=True)) else: msg = ' {}'.format(cmd) ret.append( self.curse_add_line(msg, decoration='PROCESS', splittable=True)) if glances_processes.is_tree_enabled(): # mark position to add tree decoration ret[-1]["_tree_decoration"] = True if arguments: msg = ' {}'.format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = ' {}'.format(p['name']) ret.append(self.curse_add_line(msg, splittable=True)) except UnicodeEncodeError: ret.append(self.curse_add_line('', splittable=True)) # Add extended stats but only for the top processes # !!! CPU consumption ??? # TODO: extended stats into the web interface if first and 'extended_stats' in p: # Left padding xpad = ' ' * 13 # First line is CPU affinity if 'cpu_affinity' in p and p['cpu_affinity'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'CPU affinity: ' + str(len( p['cpu_affinity'])) + ' cores' ret.append(self.curse_add_line(msg, splittable=True)) # Second line is memory info if 'memory_info' in p and p['memory_info'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'Memory info: ' for k, v in iteritems(p['memory_info']._asdict()): # Ignore rss and vms (already displayed) if k not in ['rss', 'vms'] and v is not None: msg += k + ' ' + self.auto_unit( v, low_precision=False) + ' ' if 'memory_swap' in p and p['memory_swap'] is not None: msg += 'swap ' + self.auto_unit(p['memory_swap'], low_precision=False) ret.append(self.curse_add_line(msg, splittable=True)) # Third line is for open files/network sessions msg = '' if 'num_threads' in p and p['num_threads'] is not None: msg += 'threads ' + str(p['num_threads']) + ' ' if 'num_fds' in p and p['num_fds'] is not None: msg += 'files ' + str(p['num_fds']) + ' ' if 'num_handles' in p and p['num_handles'] is not None: msg += 'handles ' + str(p['num_handles']) + ' ' if 'tcp' in p and p['tcp'] is not None: msg += 'TCP ' + str(p['tcp']) + ' ' if 'udp' in p and p['udp'] is not None: msg += 'UDP ' + str(p['udp']) + ' ' if msg != '': ret.append(self.curse_new_line()) msg = xpad + 'Open: ' + msg ret.append(self.curse_add_line(msg, splittable=True)) # Fouth line is IO nice level (only Linux and Windows OS) if 'ionice' in p and p['ionice'] is not None: ret.append(self.curse_new_line()) msg = xpad + 'IO nice: ' k = 'Class is ' v = p['ionice'].ioclass # Linux: The scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle. # Windows: On Windows only ioclass is used and it can be set to 2 (normal), 1 (low) or 0 (very low). if WINDOWS: if v == 0: msg += k + 'Very Low' elif v == 1: msg += k + 'Low' elif v == 2: msg += 'No specific I/O priority' else: msg += k + str(v) else: if v == 0: msg += 'No specific I/O priority' elif v == 1: msg += k + 'Real Time' elif v == 2: msg += k + 'Best Effort' elif v == 3: msg += k + 'IDLE' else: msg += k + str(v) # value is a number which goes from 0 to 7. # The higher the value, the lower the I/O priority of the process. if hasattr(p['ionice'], 'value') and p['ionice'].value != 0: msg += ' (value %s/7)' % str(p['ionice'].value) ret.append(self.curse_add_line(msg, splittable=True)) return ret