Beispiel #1
0
    def update(self):
        """Update disk I/O stats using the input method."""
        # Reset stats
        self.reset()

        if self.get_input() == 'local':
            # Update stats using the standard system lib
            # Grab the stat using the PsUtil disk_io_counters method
            # read_count: number of reads
            # write_count: number of writes
            # read_bytes: number of bytes read
            # write_bytes: number of bytes written
            # read_time: time spent reading from disk (in milliseconds)
            # write_time: time spent writing to disk (in milliseconds)
            try:
                diskiocounters = psutil.disk_io_counters(perdisk=True)
            except:
                return self.stats

            # Previous disk IO stats are stored in the diskio_old variable
            if not hasattr(self, 'diskio_old'):
                # First call, we init the network_old var
                try:
                    self.diskio_old = diskiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # By storing time data we enable Rx/s and Tx/s calculations in the
                # XML/RPC API, which would otherwise be overly difficult work
                # for users of the API
                time_since_update = getTimeSinceLastUpdate('disk')

                diskio_new = diskiocounters
                for disk in diskio_new:
                    try:
                        # Try necessary to manage dynamic disk creation/del
                        diskstat = {}
                        diskstat['time_since_update'] = time_since_update
                        diskstat['disk_name'] = disk
                        diskstat['read_bytes'] = (
                            diskio_new[disk].read_bytes -
                            self.diskio_old[disk].read_bytes)
                        diskstat['write_bytes'] = (
                            diskio_new[disk].write_bytes -
                            self.diskio_old[disk].write_bytes)
                    except KeyError:
                        continue
                    else:
                        self.stats.append(diskstat)

                # Save stats to compute next bitrate
                self.diskio_old = diskio_new
        elif self.get_input() == 'snmp':
            # Update stats using SNMP
            # No standard way for the moment...
            pass

        return self.stats
Beispiel #2
0
    def get_docker_network(self, container_id, all_stats):
        """Return the container network usage using the Docker API (v1.0 or higher).

        Input: id is the full container id
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
        with:
            time_since_update: number of seconds elapsed between the latest grab
            rx: Number of byte received
            tx: Number of byte transmited
        """
        # Init the returned dict
        network_new = {}

        # Read the rx/tx stats (in bytes)
        try:
            netcounters = all_stats["network"]
        except KeyError as e:
            # all_stats do not have NETWORK information
            logger.debug(
                "Can not grab NET usage for container {0} ({1})".format(
                    container_id, e))
            # No fallback available...
            return network_new

        # Previous network interface stats are stored in the network_old variable
        if not hasattr(self, 'inetcounters_old'):
            # First call, we init the network_old var
            self.netcounters_old = {}
            try:
                self.netcounters_old[container_id] = netcounters
            except (IOError, UnboundLocalError):
                pass

        if container_id not in self.netcounters_old:
            try:
                self.netcounters_old[container_id] = netcounters
            except (IOError, UnboundLocalError):
                pass
        else:
            # By storing time data we enable Rx/s and Tx/s calculations in the
            # XML/RPC API, which would otherwise be overly difficult work
            # for users of the API
            network_new['time_since_update'] = getTimeSinceLastUpdate(
                'docker_net_{0}'.format(container_id))
            network_new['rx'] = netcounters["rx_bytes"] - self.netcounters_old[
                container_id]["rx_bytes"]
            network_new['tx'] = netcounters["tx_bytes"] - self.netcounters_old[
                container_id]["tx_bytes"]
            network_new['cumulative_rx'] = netcounters["rx_bytes"]
            network_new['cumulative_tx'] = netcounters["tx_bytes"]

            # Save stats to compute next bitrate
            self.netcounters_old[container_id] = netcounters

        # Return the stats
        return network_new
Beispiel #3
0
    def update(self):
        """Update the processes stats."""
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # For each existing process...
        for proc in psutil.process_iter():
            try:
                # Get stats using the PSUtil
                procstat = self.__get_process_stats(proc)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # ignore the 'idle' process on Windows and *BSD
                # ignore the 'kernel_task' process on OS X
                # waiting for upstream patch from psutil
                if (is_bsd and procstat['name'] == 'idle' or
                        is_windows and procstat['name'] == 'System Idle Process' or
                        is_mac and procstat['name'] == 'kernel_task'):
                    continue
                # Update processcount (global statistics)
                try:
                    self.processcount[str(proc.status())] += 1
                except KeyError:
                    # Key did not exist, create it
                    self.processcount[str(proc.status())] = 1
                else:
                    self.processcount['total'] += 1
                # Update thread number (global statistics)
                try:
                    self.processcount['thread'] += proc.num_threads()
                except:
                    pass
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue
            else:
                # Update processlist
                self.processlist.append(procstat)

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()
Beispiel #4
0
    def get_docker_network(self, container_id, all_stats):
        """Return the container network usage using the Docker API (v1.0 or higher).

        Input: id is the full container id
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
        with:
            time_since_update: number of seconds elapsed between the latest grab
            rx: Number of byte received
            tx: Number of byte transmited
        """
        # Init the returned dict
        network_new = {}

        # Read the rx/tx stats (in bytes)
        try:
            netcounters = all_stats["network"]
        except KeyError as e:
            # all_stats do not have NETWORK information
            logger.debug("Can not grab NET usage for container {0} ({1})".format(container_id, e))
            # No fallback available...
            return network_new

        # Previous network interface stats are stored in the network_old variable
        if not hasattr(self, 'inetcounters_old'):
            # First call, we init the network_old var
            self.netcounters_old = {}
            try:
                self.netcounters_old[container_id] = netcounters
            except (IOError, UnboundLocalError):
                pass

        if container_id not in self.netcounters_old:
            try:
                self.netcounters_old[container_id] = netcounters
            except (IOError, UnboundLocalError):
                pass
        else:
            # By storing time data we enable Rx/s and Tx/s calculations in the
            # XML/RPC API, which would otherwise be overly difficult work
            # for users of the API
            network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{0}'.format(container_id))
            network_new['rx'] = netcounters["rx_bytes"] - self.netcounters_old[container_id]["rx_bytes"]
            network_new['tx'] = netcounters["tx_bytes"] - self.netcounters_old[container_id]["tx_bytes"]
            network_new['cumulative_rx'] = netcounters["rx_bytes"]
            network_new['cumulative_tx'] = netcounters["tx_bytes"]

            # Save stats to compute next bitrate
            self.netcounters_old[container_id] = netcounters

        # Return the stats
        return network_new
Beispiel #5
0
    def update(self):
        """Update network stats using the input method.

        Stats is a list of dict (one dict per interface)
        """
        # Reset stats
        self.reset()

        if self.get_input() == 'local':
            # Update stats using the standard system lib

            # Grab network interface stat using the PsUtil net_io_counter method
            try:
                netiocounters = psutil.net_io_counters(pernic=True)
            except UnicodeDecodeError:
                return self.stats

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # By storing time data we enable Rx/s and Tx/s calculations in the
                # XML/RPC API, which would otherwise be overly difficult work
                # for users of the API
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters
                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = network_new[net].bytes_recv
                        netstat['rx'] = (network_new[net].bytes_recv -
                                         self.network_old[net].bytes_recv)
                        netstat['cumulative_tx'] = network_new[net].bytes_sent
                        netstat['tx'] = (network_new[net].bytes_sent -
                                         self.network_old[net].bytes_sent)
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        elif self.get_input() == 'snmp':
            # Update stats using SNMP

            # SNMP bulk command to get all network interface in one shot
            netiocounters = self.set_stats_snmp(snmp_oid=snmp_oid, bulk=True)

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # See description in the 'local' block
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters

                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = float(network_new[net]['cumulative_rx'])
                        netstat['rx'] = (float(network_new[net]['cumulative_rx']) -
                                         float(self.network_old[net]['cumulative_rx']))
                        netstat['cumulative_tx'] = float(network_new[net]['cumulative_tx'])
                        netstat['tx'] = (float(network_new[net]['cumulative_tx']) -
                                         float(self.network_old[net]['cumulative_tx']))
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        return self.stats
Beispiel #6
0
    def update(self):
        """
        Update the processes stats
        """
        # Reset the stats
        self.processlist = []
        self.processcount = {
            'total': 0,
            'running': 0,
            'sleeping': 0,
            'thread': 0
        }

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # Ignore kernel threads if needed
            if (self.no_kernel_threads and (not is_windows)
                    and is_kernel_thread(proc)):
                continue

            # If self.get_max_processes() is None: Only retreive mandatory stats
            # Else: retreive mandatory and standard stats
            s = self.__get_process_stats(
                proc,
                mandatory_stats=True,
                standard_stats=self.get_max_processes() is None)
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline'])
                             and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or is_windows
                    and processdict[proc]['name'] == 'System Idle Process'
                    or is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                try:
                    self.processcount[str(proc.status())] = 1
                except psutil.NoSuchProcess:
                    pass
            except psutil.NoSuchProcess:
                pass
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except Exception:
                pass

        if self._enable_tree:
            self.process_tree = ProcessTreeNode.build_tree(
                processdict, self.getsortkey(), self.no_kernel_threads)

            for i, node in enumerate(self.process_tree):
                # Only retreive stats for visible processes (get_max_processes)
                if (self.get_max_processes()
                        is not None) and (i >= self.get_max_processes()):
                    break

                # add standard stats
                new_stats = self.__get_process_stats(node.process,
                                                     mandatory_stats=False,
                                                     standard_stats=True,
                                                     extended_stats=False)
                if new_stats is not None:
                    node.stats.update(new_stats)

                # Add a specific time_since_update stats for bitrate
                node.stats['time_since_update'] = time_since_update

        else:
            # Process optimization
            # Only retreive stats for visible processes (get_max_processes)
            if self.get_max_processes() is not None:
                # Sort the internal dict and cut the top N (Return a list of tuple)
                # tuple=key (proc), dict (returned by __get_process_stats)
                try:
                    processiter = sorted(processdict.items(),
                                         key=lambda x: x[1][self.getsortkey()],
                                         reverse=True)
                except (KeyError, TypeError) as e:
                    logger.error("Cannot sort process list by %s (%s)" %
                                 (self.getsortkey(), e))
                    logger.error("%s" % str(processdict.items()[0]))
                    # Fallback to all process (issue #423)
                    processloop = processdict.items()
                    first = False
                else:
                    processloop = processiter[0:self.get_max_processes()]
                    first = True
            else:
                # Get all processes stats
                processloop = processdict.items()
                first = False
            for i in processloop:
                # Already existing mandatory stats
                procstat = i[1]
                if self.get_max_processes() is not None:
                    # Update with standard stats
                    # and extended stats but only for TOP (first) process
                    s = self.__get_process_stats(i[0],
                                                 mandatory_stats=False,
                                                 standard_stats=True,
                                                 extended_stats=first)
                    if s is None:
                        continue
                    procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()
Beispiel #7
0
    def update(self):
        """Update network stats using the input method.

        Stats is a list of dict (one dict per interface)
        """
        # Reset stats
        self.reset()

        if self.get_input() == 'local':
            # Update stats using the standard system lib

            # Grab network interface stat using the PsUtil net_io_counter method
            try:
                netiocounters = psutil.net_io_counters(pernic=True)
            except UnicodeDecodeError:
                return self.stats

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # By storing time data we enable Rx/s and Tx/s calculations in the
                # XML/RPC API, which would otherwise be overly difficult work
                # for users of the API
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters
                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = network_new[net].bytes_recv
                        netstat['rx'] = (network_new[net].bytes_recv -
                                         self.network_old[net].bytes_recv)
                        netstat['cumulative_tx'] = network_new[net].bytes_sent
                        netstat['tx'] = (network_new[net].bytes_sent -
                                         self.network_old[net].bytes_sent)
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        elif self.get_input() == 'snmp':
            # Update stats using SNMP

            # SNMP bulk command to get all network interface in one shot
            try:
                netiocounters = self.set_stats_snmp(
                    snmp_oid=snmp_oid[self.get_short_system_name()], bulk=True)
            except KeyError:
                netiocounters = self.set_stats_snmp(
                    snmp_oid=snmp_oid['default'], bulk=True)

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # See description in the 'local' block
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters

                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        # Windows: a tips is needed to convert HEX to TXT
                        # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx
                        if self.get_short_system_name() == 'windows':
                            try:
                                netstat['interface_name'] = str(
                                    base64.b16decode(net[2:-2].upper()))
                            except TypeError:
                                netstat['interface_name'] = net
                        else:
                            netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = float(
                            network_new[net]['cumulative_rx'])
                        netstat['rx'] = (
                            float(network_new[net]['cumulative_rx']) -
                            float(self.network_old[net]['cumulative_rx']))
                        netstat['cumulative_tx'] = float(
                            network_new[net]['cumulative_tx'])
                        netstat['tx'] = (
                            float(network_new[net]['cumulative_tx']) -
                            float(self.network_old[net]['cumulative_tx']))
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        return self.stats
    def update(self):
        """
        Update the processes stats
        """
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # If self.get_max_processes() is None: Only retreive mandatory stats
            # Else: retreive mandatoryadn standard stast
            s = self.__get_process_stats(proc, 
                                         mandatory_stats=True, 
                                         standard_stats=self.get_max_processes() is None)
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or
                is_windows and processdict[proc]['name'] == 'System Idle Process' or
                is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                self.processcount[str(proc.status())] = 1
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except:
                pass

        if self.get_max_processes() is not None:
            # Sort the internal dict and cut the top N (Return a list of tuple)
            # tuple=key (proc), dict (returned by __get_process_stats)
            processiter = sorted(processdict.items(), key=lambda x: x[1][self.getsortkey()], reverse=True)
            first = True
            for i in processiter[0:self.get_max_processes()]:
                # Already existing mandatory stats
                procstat = i[1]
                # Update with standard stats
                # and extended stats but only for TOP (first) process
                s = self.__get_process_stats(i[0], 
                                             mandatory_stats=False, 
                                             standard_stats=True,
                                             extended_stats=first)
                if s is None:
                    continue
                procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False
        else:
            # Get all the processes
            for i in processdict.items():
                # Already existing mandatory and standard stats
                procstat = i[1]
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()
Beispiel #9
0
    def update(self):
        """Update the processes stats."""
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # Ignore kernel threads if needed
            if self.no_kernel_threads and not is_windows and is_kernel_thread(proc):
                continue

            # If self.max_processes is None: Only retreive mandatory stats
            # Else: retreive mandatory and standard stats
            try:
                s = self.__get_process_stats(proc, 
                                             mandatory_stats=True, 
                                             standard_stats=self.get_max_processes() is None)
            except psutil.NoSuchProcess:
                # Can happen if process no longer exists
                continue
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or
                    is_windows and processdict[proc]['name'] == 'System Idle Process' or
                    is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                try:
                    self.processcount[str(proc.status())] = 1
                except psutil.NoSuchProcess:
                    pass
            except psutil.NoSuchProcess:
                pass
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except Exception:
                pass

        if self._enable_tree:
            self.process_tree = ProcessTreeNode.build_tree(processdict,
                                                           self.sort_key,
                                                           self.sort_reverse,
                                                           self.no_kernel_threads)

            for i, node in enumerate(self.process_tree):
                # Only retreive stats for visible processes (max_processes)
                if self.max_processes is not None and i >= self.max_processes:
                    break

                # add standard stats
                new_stats = self.__get_process_stats(node.process,
                                                     mandatory_stats=False,
                                                     standard_stats=True,
                                                     extended_stats=False)
                if new_stats is not None:
                    node.stats.update(new_stats)

                # Add a specific time_since_update stats for bitrate
                node.stats['time_since_update'] = time_since_update

        else:
            # Process optimization
            # Only retreive stats for visible processes (max_processes)
            if self.max_processes is not None:
                # Sort the internal dict and cut the top N (Return a list of tuple)
                # tuple=key (proc), dict (returned by __get_process_stats)
                try:
                    processiter = sorted(processdict.items(),
                                         key=lambda x: x[1][self.sort_key],
                                         reverse=self.sort_reverse)
                except (KeyError, TypeError) as e:
                    logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e))
                    logger.error("%s" % str(processdict.items()[0]))
                    # Fallback to all process (issue #423)
                    processloop = processdict.items()
                    first = False
                else:
                    processloop = processiter[0:self.max_processes]
                    first = True
            else:
                # Get all processes stats
                processloop = processdict.items()
                first = False

            for i in processloop:
                # Already existing mandatory stats
                procstat = i[1]
                if self.max_processes is not None:
                    # Update with standard stats
                    # and extended stats but only for TOP (first) process
                    s = self.__get_process_stats(i[0],
                                                 mandatory_stats=False,
                                                 standard_stats=True,
                                                 extended_stats=first)
                    if s is None:
                        continue
                    procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False

        # Build the all processes list used by the monitored list
        self.allprocesslist = processdict.values()

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()
    def update(self):
        """Update network stats using the input method.

        Stats is a list of dict (one dict per interface)
        """
        # Reset stats
        self.reset()

        if self.get_input() == 'local':
            # Update stats using the standard system lib

            # Grab network interface stat using the PsUtil net_io_counter method
            try:
                netiocounters = psutil.net_io_counters(pernic=True)
            except UnicodeDecodeError:
                return self.stats

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # By storing time data we enable Rx/s and Tx/s calculations in the
                # XML/RPC API, which would otherwise be overly difficult work
                # for users of the API
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters
                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = network_new[net].bytes_recv
                        netstat['rx'] = (network_new[net].bytes_recv -
                                         self.network_old[net].bytes_recv)
                        netstat['cumulative_tx'] = network_new[net].bytes_sent
                        netstat['tx'] = (network_new[net].bytes_sent -
                                         self.network_old[net].bytes_sent)
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        netstat['key'] = self.get_key()
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        elif self.get_input() == 'snmp':
            # Update stats using SNMP

            # SNMP bulk command to get all network interface in one shot
            try:
                netiocounters = self.set_stats_snmp(snmp_oid=snmp_oid[self.get_short_system_name()],
                                                    bulk=True)
            except KeyError:
                netiocounters = self.set_stats_snmp(snmp_oid=snmp_oid['default'],
                                                    bulk=True)

            # Previous network interface stats are stored in the network_old variable
            if not hasattr(self, 'network_old'):
                # First call, we init the network_old var
                try:
                    self.network_old = netiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # See description in the 'local' block
                time_since_update = getTimeSinceLastUpdate('net')

                # Loop over interfaces
                network_new = netiocounters

                for net in network_new:
                    try:
                        # Try necessary to manage dynamic network interface
                        netstat = {}
                        # Windows: a tips is needed to convert HEX to TXT
                        # http://blogs.technet.com/b/networking/archive/2009/12/18/how-to-query-the-list-of-network-interfaces-using-snmp-via-the-ifdescr-counter.aspx
                        if self.get_short_system_name() == 'windows':
                            try:
                                netstat['interface_name'] = str(base64.b16decode(net[2:-2].upper()))
                            except TypeError:
                                netstat['interface_name'] = net
                        else:
                            netstat['interface_name'] = net
                        netstat['time_since_update'] = time_since_update
                        netstat['cumulative_rx'] = float(network_new[net]['cumulative_rx'])
                        netstat['rx'] = (float(network_new[net]['cumulative_rx']) -
                                         float(self.network_old[net]['cumulative_rx']))
                        netstat['cumulative_tx'] = float(network_new[net]['cumulative_tx'])
                        netstat['tx'] = (float(network_new[net]['cumulative_tx']) -
                                         float(self.network_old[net]['cumulative_tx']))
                        netstat['cumulative_cx'] = (netstat['cumulative_rx'] +
                                                    netstat['cumulative_tx'])
                        netstat['cx'] = netstat['rx'] + netstat['tx']
                    except KeyError:
                        continue
                    else:
                        netstat['key'] = self.get_key()
                        self.stats.append(netstat)

                # Save stats to compute next bitrate
                self.network_old = network_new

        # Update the history list
        self.update_stats_history(self.get_key())

        # Update the view
        self.update_views()

        return self.stats
Beispiel #11
0
    def get_docker_io(self, container_id, all_stats):
        """Return the container IO usage using the Docker API (v1.0 or higher).

        Input: id is the full container id
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
        with:
            time_since_update: number of seconds elapsed between the latest grab
            ior: Number of byte readed
            iow: Number of byte written
        """
        # Init the returned dict
        io_new = {}

        # Read the ior/iow stats (in bytes)
        try:
            iocounters = all_stats["blkio_stats"]
        except KeyError as e:
            # all_stats do not have io information
            logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e))
            # No fallback available...
            return io_new

        # Previous io interface stats are stored in the io_old variable
        if not hasattr(self, 'iocounters_old'):
            # First call, we init the io_old var
            self.iocounters_old = {}
            try:
                self.iocounters_old[container_id] = iocounters
            except (IOError, UnboundLocalError):
                pass

        if container_id not in self.iocounters_old:
            try:
                self.iocounters_old[container_id] = iocounters
            except (IOError, UnboundLocalError):
                pass
        else:
            # By storing time data we enable IoR/s and IoW/s calculations in the
            # XML/RPC API, which would otherwise be overly difficult work
            # for users of the API
            try:
                # Read IOR and IOW value in the structure list of dict
                ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
                iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
                ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
                iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
            except (IndexError, KeyError) as e:
                # all_stats do not have io information
                logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e))
            else:
                io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{0}'.format(container_id))
                io_new['ior'] = ior - ior_old
                io_new['iow'] = iow - iow_old
                io_new['cumulative_ior'] = ior
                io_new['cumulative_iow'] = iow

                # Save stats to compute next bitrate
                self.iocounters_old[container_id] = iocounters

        # Return the stats
        return io_new
Beispiel #12
0
    def update(self):
        """
        Update the processes stats
        """
        # Reset the stats
        self.processlist = []
        self.processcount = {
            'total': 0,
            'running': 0,
            'sleeping': 0,
            'thread': 0
        }

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # If self.get_max_processes() is None: Only retreive mandatory stats
            # Else: retreive mandatoryadn standard stast
            s = self.__get_process_stats(
                proc,
                mandatory_stats=True,
                standard_stats=self.get_max_processes() is None)
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline'])
                             and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or is_windows
                    and processdict[proc]['name'] == 'System Idle Process'
                    or is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                self.processcount[str(proc.status())] = 1
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except:
                pass

        if self.get_max_processes() is not None:
            # Sort the internal dict and cut the top N (Return a list of tuple)
            # tuple=key (proc), dict (returned by __get_process_stats)
            processiter = sorted(processdict.items(),
                                 key=lambda x: x[1][self.getsortkey()],
                                 reverse=True)
            first = True
            for i in processiter[0:self.get_max_processes()]:
                # Already existing mandatory stats
                procstat = i[1]
                # Update with standard stats
                # and extended stats but only for TOP (first) process
                s = self.__get_process_stats(i[0],
                                             mandatory_stats=False,
                                             standard_stats=True,
                                             extended_stats=first)
                if s is None:
                    continue
                procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False
        else:
            # Get all the processes
            for i in processdict.items():
                # Already existing mandatory and standard stats
                procstat = i[1]
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()
Beispiel #13
0
    def get_docker_io(self, container_id, all_stats):
        """Return the container IO usage using the Docker API (v1.0 or higher).

        Input: id is the full container id
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
        with:
            time_since_update: number of seconds elapsed between the latest grab
            ior: Number of byte readed
            iow: Number of byte written
        """
        # Init the returned dict
        io_new = {}

        # Read the ior/iow stats (in bytes)
        try:
            iocounters = all_stats["blkio_stats"]
        except KeyError as e:
            # all_stats do not have io information
            logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e))
            # No fallback available...
            return io_new

        # Previous io interface stats are stored in the io_old variable
        if not hasattr(self, 'iocounters_old'):
            # First call, we init the io_old var
            self.iocounters_old = {}
            try:
                self.iocounters_old[container_id] = iocounters
            except (IOError, UnboundLocalError):
                pass

        if container_id not in self.iocounters_old:
            try:
                self.iocounters_old[container_id] = iocounters
            except (IOError, UnboundLocalError):
                pass
        else:
            # By storing time data we enable IoR/s and IoW/s calculations in the
            # XML/RPC API, which would otherwise be overly difficult work
            # for users of the API
            try:
                # Read IOR and IOW value in the structure list of dict
                ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
                iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
                ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
                iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
            except (IndexError, KeyError) as e:
                # all_stats do not have io information
                logger.debug("Can not grab block IO usage for container {0} ({1})".format(container_id, e))
            else:
                io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{0}'.format(container_id))
                io_new['ior'] = ior - ior_old
                io_new['iow'] = iow - iow_old
                io_new['cumulative_ior'] = ior
                io_new['cumulative_iow'] = iow

                # Save stats to compute next bitrate
                self.iocounters_old[container_id] = iocounters

        # Return the stats
        return io_new