예제 #1
0
    def update(self):
        """Update IP stats using the input method.

        Stats is dict
        """
        # Reset stats
        self.reset()

        if self.input_method == 'local' and netifaces_tag:
            # Update stats using the netifaces lib
            try:
                default_gw = netifaces.gateways()['default'][netifaces.AF_INET]
            except KeyError:
                logger.debug("Can not grab the default gateway")
            else:
                try:
                    self.stats['address'] = netifaces.ifaddresses(
                        default_gw[1])[netifaces.AF_INET][0]['addr']
                    self.stats['mask'] = netifaces.ifaddresses(
                        default_gw[1])[netifaces.AF_INET][0]['netmask']
                    self.stats['mask_cidr'] = self.ip_to_cidr(
                        self.stats['mask'])
                    self.stats['gateway'] = netifaces.gateways()['default'][
                        netifaces.AF_INET][0]
                except KeyError as e:
                    logger.debug("Can not grab IP information (%s)".format(e))

        elif self.input_method == 'snmp':
            # Not implemented yet
            pass

        # Update the view
        self.update_views()

        return self.stats
예제 #2
0
 def reset_stats_history(self):
     """Reset the stats history (dict of list)."""
     if self.args is not None and self.args.enable_history and self.get_items_history_list() is not None:
         reset_list = [i['name'] for i in self.get_items_history_list()]
         logger.debug("Reset history for plugin {0} (items: {0})".format(
             self.plugin_name, reset_list))
         self.stats_history = {}
예제 #3
0
 def init_stats_history(self):
     """Init the stats history (dict of list)."""
     ret = None
     if self.args is not None and self.args.enable_history and self.get_items_history_list() is not None:
         init_list = [i['name'] for i in self.get_items_history_list()]
         logger.debug("Stats history activated for plugin {0} (items: {0})".format(
             self.plugin_name, init_list))
         ret = {}
     return ret
예제 #4
0
    def __init__(self, config):
        """Init the monitoring list from the configuration file, if it exists."""
        self.config = config

        if self.config is not None and self.config.has_section('monitor'):
            # Process monitoring list
            logger.debug("Monitor list configuration detected")
            self.__set_monitor_list('monitor', 'list')
        else:
            self.__monitor_list = []
예제 #5
0
 def __init__(self):
     """Init batteries stats."""
     try:
         self.bat = batinfo.batteries()
         self.initok = True
         self.bat_list = []
         self.update()
     except Exception as e:
         self.initok = False
         logger.debug("Cannot init unicongrabbat class (%s)" % e)
예제 #6
0
    def get_docker_cpu(self, container_id, all_stats):
        """Return the container CPU usage.

        Input: id is the full container id
               all_stats is the output of the stats method of the Docker API
        Output: a dict {'total': 1.49}
        """
        cpu_new = {}
        ret = {'total': 0.0}

        # Read the stats
        # For each container, you will find a pseudo-file cpuacct.stat,
        # containing the CPU usage accumulated by the processes of the container.
        # Those times are expressed in ticks of 1/USER_HZ of a second.
        # On x86 systems, USER_HZ is 100.
        try:
            cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage']
            cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage']
            cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'])
        except KeyError as e:
            # all_stats do not have CPU information
            logger.debug("Can not grab CPU usage for container {0} ({1}). Trying fallback method.".format(container_id, e))
            # Trying fallback to old grab method
            ret = self.get_docker_cpu_old(container_id)
            # Get the user ticks
            ticks = self.get_user_ticks()
            for k in ret.keys():
                ret[k] = float(ret[k]) / ticks
        else:
            # Previous CPU stats stored in the cpu_old variable
            if not hasattr(self, 'cpu_old'):
                # First call, we init the cpu_old variable
                self.cpu_old = {}
                try:
                    self.cpu_old[container_id] = cpu_new
                except (IOError, UnboundLocalError):
                    pass

            if container_id not in self.cpu_old:
                try:
                    self.cpu_old[container_id] = cpu_new
                except (IOError, UnboundLocalError):
                    pass
            else:
                #
                cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total'])
                system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system'])
                if cpu_delta > 0.0 and system_delta > 0.0:
                    ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100

                # Save stats to compute next stats
                self.cpu_old[container_id] = cpu_new

        # Return the stats
        return ret
예제 #7
0
 def load_limits(self, config):
     """Load limits from the configuration file, if it exists."""
     if (hasattr(config, 'has_section') and
             config.has_section(self.plugin_name)):
         for level, _ in config.items(self.plugin_name):
             # Read limits
             limit = '_'.join([self.plugin_name, level])
             try:
                 self._limits[limit] = config.get_float_value(self.plugin_name, level)
             except ValueError:
                 self._limits[limit] = config.get_value(self.plugin_name, level).split(",")
             logger.debug("Load limit: {0} = {1}".format(limit, self._limits[limit]))
예제 #8
0
    def export(self, input_stats=None):
        """Export all the stats.

        Each export module is ran in a dedicated thread.
        """
        # threads = []
        input_stats = input_stats or {}

        for e in self._exports:
            logger.debug("Export stats using the %s module" % e)
            thread = threading.Thread(target=self._exports[e].update,
                                      args=(input_stats, ))
            # threads.append(thread)
            thread.start()
예제 #9
0
 def set_plugins(self, input_plugins):
     """Set the plugin list according to the server."""
     header = "unicon_"
     for item in input_plugins:
         # Import the plugin
         plugin = __import__(header + item)
         # Add the plugin to the dictionary
         # The key is the plugin name
         # for example, the file unicon_xxx.py
         # generate self._plugins_list["xxx"] = ...
         logger.debug("Server uses {0} plugin".format(item))
         self._plugins[item] = plugin.Plugin()
     # Restoring system path
     sys.path = sys_path
예제 #10
0
 def process_filter(self, value):
     """Set the process filter."""
     logger.info("Set process filter to {0}".format(value))
     self._process_filter = value
     if value is not None:
         try:
             self._process_filter_re = re.compile(value)
             logger.debug("Process filter regex compilation OK: {0}".format(
                 self.process_filter))
         except Exception:
             logger.error(
                 "Cannot compile process filter regex: {0}".format(value))
             self._process_filter_re = None
     else:
         self._process_filter_re = None
예제 #11
0
 def get(self, sensor_type='temperature_core'):
     """Get sensors list."""
     self.__update__()
     if sensor_type == 'temperature_core':
         ret = [
             s for s in self.sensors_list if s['unit'] == SENSOR_TEMP_UNIT
         ]
     elif sensor_type == 'fan_speed':
         ret = [
             s for s in self.sensors_list if s['unit'] == SENSOR_FAN_UNIT
         ]
     else:
         # Unknown type
         logger.debug("Unknown sensor type %s" % sensor_type)
         ret = []
     return ret
예제 #12
0
    def get_docker_network(self, container_id, all_stats):
        """Return the container network usage using the Docker API (v1.0 or higher).

        Input: id is the full container id
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
        """
        # Init the returned dict
        network_new = {}

        # Read the rx/tx stats (in bytes)
        try:
            netiocounters = all_stats["network"]
        except KeyError as e:
            # all_stats do not have NETWORK information
            logger.debug("Can not grab NET usage for container {0} ({1})".format(container_id, e))
            # No fallback available...
            return network_new

        # Previous network interface stats are stored in the network_old variable
        if not hasattr(self, 'netiocounters_old'):
            # First call, we init the network_old var
            self.netiocounters_old = {}
            try:
                self.netiocounters_old[container_id] = netiocounters
            except (IOError, UnboundLocalError):
                pass

        if container_id not in self.netiocounters_old:
            try:
                self.netiocounters_old[container_id] = netiocounters
            except (IOError, UnboundLocalError):
                pass
        else:
            # By storing time data we enable Rx/s and Tx/s calculations in the
            # XML/RPC API, which would otherwise be overly difficult work
            # for users of the API
            network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
            network_new['rx'] = netiocounters["rx_bytes"] - self.netiocounters_old[container_id]["rx_bytes"]
            network_new['tx'] = netiocounters["tx_bytes"] - self.netiocounters_old[container_id]["tx_bytes"]
            network_new['cumulative_rx'] = netiocounters["rx_bytes"]
            network_new['cumulative_tx'] = netiocounters["tx_bytes"]

            # Save stats to compute next bitrate
            self.netiocounters_old[container_id] = netiocounters

        # Return the stats
        return network_new
예제 #13
0
    def connect(self, version=None):
        """Connect to the Docker server."""
        # Init connection to the Docker API
        try:
            if version is None:
                ret = docker.Client(base_url='unix://var/run/docker.sock')
            else:
                ret = docker.Client(base_url='unix://var/run/docker.sock',
                                    version=version)
        except NameError:
            # docker lib not found
            return None
        try:
            ret.version()
        except requests.exceptions.ConnectionError as e:
            # Connexion error (Docker not detected)
            # Let this message in debug mode
            logger.debug("Can't connect to the Docker server (%s)" % e)
            return None
        except docker.errors.APIError as e:
            if version is None:
                # API error (Version mismatch ?)
                logger.debug("Docker API error (%s)" % e)
                # Try the connection with the server version
                version = re.search('server\:\ (.*)\)\".*\)', str(e))
                if version:
                    logger.debug("Try connection with Docker API version %s" % version.group(1))
                    ret = self.connect(version=version.group(1))
                else:
                    logger.debug("Can not retreive Docker server version")
                    ret = None
            else:
                # API error
                logger.error("Docker API error (%s)" % e)
                ret = None
        except Exception as e:
            # Others exceptions...
            # Connexion error (Docker not detected)
            logger.error("Can't connect to the Docker server (%s)" % e)
            ret = None

        # Log an info if Docker plugin is disabled
        if ret is None:
            logger.debug("Docker plugin is disable because an error has been detected")

        return ret
예제 #14
0
    def fetch(self):
        """Fetch the data from hddtemp daemon."""
        # Taking care of sudden deaths/stops of hddtemp daemon
        try:
            sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sck.connect((self.host, self.port))
            data = sck.recv(4096)
            sck.close()
        except socket.error as e:
            logger.warning(
                "Can not connect to an HDDtemp server ({0}:{1} => {2})".format(
                    self.host, self.port, e))
            logger.debug(
                "Disable the HDDtemp module. Use the --disable-hddtemp to hide the previous message."
            )
            self.args.disable_hddtemp = True
            data = ""

        return data
예제 #15
0
    def __init__(self, config=None, args=None):
        # Quiet mode
        self._quiet = args.quiet
        self.refresh_time = args.time

        # Init stats
        self.stats = UniconStats(config=config, args=args)

        # If process extended stats is disabled by user
        if not args.enable_process_extended:
            logger.debug("Extended stats for top process are disabled")
            unicon_processes.disable_extended()
        else:
            logger.debug("Extended stats for top process are enabled")
            unicon_processes.enable_extended()

        # Manage optionnal process filter
        if args.process_filter is not None:
            unicon_processes.process_filter = args.process_filter

        if (not is_windows) and args.no_kernel_threads:
            # Ignore kernel threads in process list
            unicon_processes.disable_kernel_threads()

        try:
            if args.process_tree:
                # Enable process tree view
                unicon_processes.enable_tree()
        except AttributeError:
            pass

        # Initial system informations update
        self.stats.update()

        if self.quiet:
            logger.info("Quiet mode is ON: Nothing will be displayed")
            # In quiet mode, nothing is displayed
            unicon_processes.max_processes = 0
        else:
            # Default number of processes to displayed is set to 50
            unicon_processes.max_processes = 50
예제 #16
0
    def get_docker_memory(self, container_id, all_stats):
        """Return the container MEMORY.

        Input: id is the full container id
               all_stats is the output of the stats method of the Docker API
        Output: a dict {'rss': 1015808, 'cache': 356352,  'usage': ..., 'max_usage': ...}
        """
        ret = {}
        # Read the stats
        try:
            ret['rss'] = all_stats['memory_stats']['stats']['rss']
            ret['cache'] = all_stats['memory_stats']['stats']['cache']
            ret['usage'] = all_stats['memory_stats']['usage']
            ret['max_usage'] = all_stats['memory_stats']['max_usage']
        except KeyError as e:
            # all_stats do not have MEM information
            logger.debug("Can not grab MEM usage for container {0} ({1}). Trying fallback method.".format(container_id, e))
            # Trying fallback to old grab method
            ret = self.get_docker_memory_old(container_id)
        # Return the stats
        return ret
예제 #17
0
    def run(self, stat_name, criticity, commands, mustache_dict=None):
        """Run the commands (in background).

        - stats_name: plugin_name (+ header)
        - criticity: criticity of the trigger
        - commands: a list of command line with optional {{mustache}}
        - mustache_dict: Plugin stats (can be use within {{mustache}})

        Return True if the commands have been ran.
        """
        if self.get(stat_name) == criticity:
            # Action already executed => Exit
            return False

        logger.debug("Run action {0} for {1} ({2}) with stats {3}".format(
            commands, stat_name, criticity, mustache_dict))

        # Run all actions in background
        for cmd in commands:
            # Replace {{arg}} by the dict one (Thk to {Mustache})
            if pystache_tag:
                cmd_full = pystache.render(cmd, mustache_dict)
            else:
                cmd_full = cmd
            # Execute the action
            logger.info("Action triggered for {0} ({1}): {2}".format(
                stat_name, criticity, cmd_full))
            logger.debug(
                "Stats value for the trigger: {0}".format(mustache_dict))
            try:
                Popen(cmd_full, shell=True)
            except OSError as e:
                logger.error("Can't execute the action ({0})".format(e))

        self.set(stat_name, criticity)

        return True
예제 #18
0
# -*- coding: utf-8 -*-
"""Battery plugin."""

# Import libs
from sysmonitor.core.unicorn_logging import logger
from unicorn_plugin import UnicornPlugin

# Batinfo library (optional; Linux-only)
try:
    import batinfo
except ImportError:
    logger.debug("Batinfo library not found. Unicon cannot grab battery info.")


class Plugin(UnicornPlugin):
    """ battery capacity plugin.

    stats is a list
    """
    def __init__(self, args=None):
        """Init the plugin."""
        UnicornPlugin.__init__(self, args=args)

        # Init the sensor class
        self.unicongrabbat = UniconGrabBat()

        # We do not want to display the stat in a dedicated area
        # The HDD temp is displayed within the sensors plugin
        self.display_curse = False

        # Init stats
예제 #19
0
    def update(self):
        """Update Docker stats using the input method."""
        # Reset stats
        self.reset()

        # Get the current Docker API client
        if not self.docker_client:
            # First time, try to connect to the server
            self.docker_client = self.connect()
            if self.docker_client is None:
                global docker_tag
                docker_tag = False

        # The Docker-py lib is mandatory
        if not docker_tag or (self.args is not None and self.args.disable_docker):
            return self.stats

        if self.input_method == 'local':
            # Update stats
            # Exemple: {
            #     "KernelVersion": "3.16.4-tinycore64",
            #     "Arch": "amd64",
            #     "ApiVersion": "1.15",
            #     "Version": "1.3.0",
            #     "GitCommit": "c78088f",
            #     "Os": "linux",
            #     "GoVersion": "go1.3.3"
            # }
            self.stats['version'] = self.docker_client.version()
            # Example: [{u'Status': u'Up 36 seconds',
            #            u'Created': 1420378904,
            #            u'Image': u'nginx:1',
            #            u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443},
            #                       {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}],
            #            u'Command': u"nginx -g 'daemon off;'",
            #            u'Names': [u'/webstack_nginx_1'],
            #            u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}]
            self.stats['containers'] = self.docker_client.containers()
            # Get stats for all containers
            for c in self.stats['containers']:
                if not hasattr(self, 'docker_stats'):
                    # Create a dict with all the containers' stats instance
                    self.docker_stats = {}

                # TODO: Find a way to correct this
                # The following optimization is not compatible with the network stats
                # The self.docker_client.stats method should be call every time in order to have network stats refreshed
                # Nevertheless, if we call it every time, unicon is slow...
                if c['Id'] not in self.docker_stats:
                    # Create the stats instance for the current container
                    try:
                        self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True)
                        logger.debug("Create Docker stats object for container {}".format(c['Id']))
                    except Exception as e:
                        # Correct Issue #602
                        logger.error("Can not call Docker stats method {}".format(e))

                # Get the docker stats
                try:
                    # self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True)
                    all_stats = self.docker_stats[c['Id']].next()
                except Exception:
                    all_stats = {}

                c['cpu'] = self.get_docker_cpu(c['Id'], all_stats)
                c['memory'] = self.get_docker_memory(c['Id'], all_stats)
                # c['network'] = self.get_docker_network(c['Id'], all_stats)

        elif self.input_method == 'snmp':
            # Update stats using SNMP
            # Not available
            pass

        return self.stats
예제 #20
0
import numbers
import os
import re

# Import  libs
from sysmonitor.core.unicorn_logging import logger
from sysmonitor.core.unicon_timer import getTimeSinceLastUpdate
from unicorn_plugin import UnicornPlugin

# Docker-py library (optional and Linux-only)
# https://github.com/docker/docker-py
try:
    import docker
    import requests
except ImportError as e:
    logger.debug("Docker library not found (%s). unicon cannot grab Docker info." % e)
    docker_tag = False
else:
    docker_tag = True


class Plugin(UnicornPlugin):

    """ Docker plugin.

    stats is a list
    """

    def __init__(self, args=None):
        """Init the plugin."""
        UnicornPlugin.__init__(self, args=args)