예제 #1
0
    def __build_export(self, stats):
        """Build the export lists."""
        export_names = []
        export_values = []

        if isinstance(stats, dict):
            # Stats is a dict
            # Is there a key ?
            if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats):
                pre_key = '{}.'.format(stats[stats['key']])
            else:
                pre_key = ''
            # Walk through the dict
            for key, value in iteritems(stats):
                if isinstance(value, list):
                    try:
                        value = value[0]
                    except IndexError:
                        value = ''
                if isinstance(value, dict):
                    item_names, item_values = self.__build_export(value)
                    item_names = [pre_key + key.lower() + str(i) for i in item_names]
                    export_names += item_names
                    export_values += item_values
                else:
                    export_names.append(pre_key + key.lower())
                    export_values.append(value)
        elif isinstance(stats, list):
            # Stats is a list (of dict)
            # Recursive loop through the list
            for item in stats:
                item_names, item_values = self.__build_export(item)
                export_names += item_names
                export_values += item_values
        return export_names, export_values
예제 #2
0
    def msg_curse(self, args=None):
        """Return the dict to display in the curse interface."""
        # Init the return message
        ret = []

        # Only process if stats exist and display plugin enable...
        if not self.stats or args.disable_raid:
            return ret

        # Build the string message
        # Header
        msg = '{:11}'.format('RAID disks')
        ret.append(self.curse_add_line(msg, "TITLE"))
        msg = '{:>6}'.format('Used')
        ret.append(self.curse_add_line(msg))
        msg = '{:>6}'.format('Avail')
        ret.append(self.curse_add_line(msg))
        # Data
        arrays = sorted(iterkeys(self.stats))
        for array in arrays:
            # New line
            ret.append(self.curse_new_line())
            # Display the current status
            status = self.raid_alert(self.stats[array]['status'], self.stats[array]['used'], self.stats[array]['available'])
            # Data: RAID type name | disk used | disk available
            array_type = self.stats[array]['type'].upper() if self.stats[array]['type'] is not None else 'UNKNOWN'
            msg = '{:<5}{:>6}'.format(array_type, array)
            ret.append(self.curse_add_line(msg))
            if self.stats[array]['status'] == 'active':
                msg = '{:>6}'.format(self.stats[array]['used'])
                ret.append(self.curse_add_line(msg, status))
                msg = '{:>6}'.format(self.stats[array]['available'])
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'inactive':
                ret.append(self.curse_new_line())
                msg = '└─ Status {}'.format(self.stats[array]['status'])
                ret.append(self.curse_add_line(msg, status))
                components = sorted(iterkeys(self.stats[array]['components']))
                for i, component in enumerate(components):
                    if i == len(components) - 1:
                        tree_char = '└─'
                    else:
                        tree_char = '├─'
                    ret.append(self.curse_new_line())
                    msg = '   {} disk {}: '.format(tree_char, self.stats[array]['components'][component])
                    ret.append(self.curse_add_line(msg))
                    msg = '{}'.format(component)
                    ret.append(self.curse_add_line(msg))
            if self.stats[array]['used'] < self.stats[array]['available']:
                # Display current array configuration
                ret.append(self.curse_new_line())
                msg = '└─ Degraded mode'
                ret.append(self.curse_add_line(msg, status))
                if len(self.stats[array]['config']) < 17:
                    ret.append(self.curse_new_line())
                    msg = '   └─ {}'.format(self.stats[array]['config'].replace('_', 'A'))
                    ret.append(self.curse_add_line(msg))

        return ret
예제 #3
0
    def get_stats_snmp(self, bulk=False, snmp_oid=None):
        """Update stats using SNMP.

        If bulk=True, use a bulk request instead of a get request.
        """
        snmp_oid = snmp_oid or {}

        from glances.snmp import GlancesSNMPClient

        # Init the SNMP request
        clientsnmp = GlancesSNMPClient(host=self.args.client,
                                       port=self.args.snmp_port,
                                       version=self.args.snmp_version,
                                       community=self.args.snmp_community)

        # Process the SNMP request
        ret = {}
        if bulk:
            # Bulk request
            snmpresult = clientsnmp.getbulk_by_oid(0, 10,
                                                   itervalues(*snmp_oid))

            if len(snmp_oid) == 1:
                # Bulk command for only one OID
                # Note: key is the item indexed but the OID result
                for item in snmpresult:
                    if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]):
                        ret[iterkeys(snmp_oid)[0] + iterkeys(item)[0].split(
                            itervalues(snmp_oid)[0])[1]] = itervalues(item)[0]
            else:
                # Build the internal dict with the SNMP result
                # Note: key is the first item in the snmp_oid
                index = 1
                for item in snmpresult:
                    item_stats = {}
                    item_key = None
                    for key in iterkeys(snmp_oid):
                        oid = snmp_oid[key] + '.' + str(index)
                        if oid in item:
                            if item_key is None:
                                item_key = item[oid]
                            else:
                                item_stats[key] = item[oid]
                    if item_stats:
                        ret[item_key] = item_stats
                    index += 1
        else:
            # Simple get request
            snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid))

            # Build the internal dict with the SNMP result
            for key in iterkeys(snmp_oid):
                ret[key] = snmpresult[snmp_oid[key]]

        return ret
예제 #4
0
    def get_stats_snmp(self, bulk=False, snmp_oid=None):
        """Update stats using SNMP.

        If bulk=True, use a bulk request instead of a get request.
        """
        snmp_oid = snmp_oid or {}

        from glances.snmp import GlancesSNMPClient

        # Init the SNMP request
        clientsnmp = GlancesSNMPClient(host=self.args.client,
                                       port=self.args.snmp_port,
                                       version=self.args.snmp_version,
                                       community=self.args.snmp_community)

        # Process the SNMP request
        ret = {}
        if bulk:
            # Bulk request
            snmpresult = clientsnmp.getbulk_by_oid(0, 10, itervalues(*snmp_oid))

            if len(snmp_oid) == 1:
                # Bulk command for only one OID
                # Note: key is the item indexed but the OID result
                for item in snmpresult:
                    if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]):
                        ret[iterkeys(snmp_oid)[0] + iterkeys(item)
                            [0].split(itervalues(snmp_oid)[0])[1]] = itervalues(item)[0]
            else:
                # Build the internal dict with the SNMP result
                # Note: key is the first item in the snmp_oid
                index = 1
                for item in snmpresult:
                    item_stats = {}
                    item_key = None
                    for key in iterkeys(snmp_oid):
                        oid = snmp_oid[key] + '.' + str(index)
                        if oid in item:
                            if item_key is None:
                                item_key = item[oid]
                            else:
                                item_stats[key] = item[oid]
                    if item_stats:
                        ret[item_key] = item_stats
                    index += 1
        else:
            # Simple get request
            snmpresult = clientsnmp.get_by_oid(itervalues(*snmp_oid))

            # Build the internal dict with the SNMP result
            for key in iterkeys(snmp_oid):
                ret[key] = snmpresult[snmp_oid[key]]

        return ret
예제 #5
0
    def update(self, stats):
        """Update stats in the CSV output file."""
        # Get the stats
        all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export())

        # Init data with timestamp (issue#708)
        if self.first_line:
            csv_header = ['timestamp']
        csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')]

        # Loop over plugins to export
        for plugin in self.plugins_to_export():
            if isinstance(all_stats[plugin], list):
                for stat in all_stats[plugin]:
                    # First line: header
                    if self.first_line:
                        csv_header += ('{}_{}_{}'.format(
                            plugin, self.get_item_key(stat), item) for item in stat)
                    # Others lines: stats
                    csv_data += itervalues(stat)
            elif isinstance(all_stats[plugin], dict):
                # First line: header
                if self.first_line:
                    fieldnames = iterkeys(all_stats[plugin])
                    csv_header += ('{}_{}'.format(plugin, fieldname)
                                   for fieldname in fieldnames)
                # Others lines: stats
                csv_data += itervalues(all_stats[plugin])

        # Export to CSV
        if self.first_line:
            self.writer.writerow(csv_header)
            self.first_line = False
        self.writer.writerow(csv_data)
        self.csv_file.flush()
예제 #6
0
    def update(self, stats):
        """Update stats in the CSV output file."""
        # Get the stats
        all_stats = stats.getAllExports()
        plugins = stats.getAllPlugins()

        # Init data with timestamp (issue#708)
        csv_header = ["timestamp"]
        csv_data = [time.strftime("%Y-%m-%d %H:%M:%S")]

        # Loop over available plugin
        for i, plugin in enumerate(plugins):
            if plugin in self.plugins_to_export():
                if isinstance(all_stats[i], list):
                    for stat in all_stats[i]:
                        # First line: header
                        if self.first_line:
                            csv_header += ("{0}_{1}_{2}".format(plugin, self.get_item_key(stat), item) for item in stat)
                        # Others lines: stats
                        csv_data += itervalues(stat)
                elif isinstance(all_stats[i], dict):
                    # First line: header
                    if self.first_line:
                        fieldnames = iterkeys(all_stats[i])
                        csv_header += ("{0}_{1}".format(plugin, fieldname) for fieldname in fieldnames)
                    # Others lines: stats
                    csv_data += itervalues(all_stats[i])

        # Export to CSV
        if self.first_line:
            self.writer.writerow(csv_header)
            self.first_line = False
        self.writer.writerow(csv_data)
        self.csv_file.flush()
예제 #7
0
    def update_views(self):
        """Update stats views."""
        # Call the father's method
        super(Plugin, self).update_views()

        # Add specifics informations
        # Optional
        for key in iterkeys(self.stats):
            self.views[key]['optional'] = True
예제 #8
0
    def update_views(self):
        """Update stats views."""
        # Call the father's method
        super(Plugin, self).update_views()

        # Add specifics informations
        # Optional
        for key in iterkeys(self.stats):
            self.views[key]['optional'] = True
예제 #9
0
파일: glances_csv.py 프로젝트: zysr/glances
    def update(self, stats):
        """Update stats in the CSV output file."""
        # Get the stats
        all_stats = stats.getAllExportsAsDict(
            plugin_list=self.plugins_to_export())

        # Init data with timestamp (issue#708)
        if self.first_line:
            csv_header = ['timestamp']
        csv_data = [time.strftime('%Y-%m-%d %H:%M:%S')]

        # Loop over plugins to export
        for plugin in self.plugins_to_export():
            if isinstance(all_stats[plugin], list):
                for stat in all_stats[plugin]:
                    # First line: header
                    if self.first_line:
                        csv_header += ('{}_{}_{}'.format(
                            plugin, self.get_item_key(stat), item)
                                       for item in stat)
                    # Others lines: stats
                    csv_data += itervalues(stat)
            elif isinstance(all_stats[plugin], dict):
                # First line: header
                if self.first_line:
                    fieldnames = iterkeys(all_stats[plugin])
                    csv_header += ('{}_{}'.format(plugin, fieldname)
                                   for fieldname in fieldnames)
                # Others lines: stats
                csv_data += itervalues(all_stats[plugin])

        # Export to CSV
        # Manage header
        if self.first_line:
            if self.old_header is None:
                # New file, write the header on top on the CSV file
                self.writer.writerow(csv_header)
            # File already exist, check if header are compatible
            if self.old_header != csv_header:
                # Header are differents, log an error and do not write data
                logger.error(
                    "Cannot append data to existing CSV file. Headers are differents."
                )
                logger.debug("Old header: {}".format(self.old_header))
                logger.debug("New header: {}".format(csv_header))
            else:
                # Header are equals, ready to write data
                self.old_header = None
            # Only do this once
            self.first_line = False
        # Manage data
        if self.old_header is None:
            self.writer.writerow(csv_data)
            self.csv_file.flush()
예제 #10
0
 def update_processcount(self, plist):
     """Update the global process count from the current processes list"""
     # Update the maximum process ID (pid) number
     self.processcount['pid_max'] = self.pid_max
     # For each key in the processcount dict
     # count the number of processes with the same status
     for k in iterkeys(self.processcount):
         self.processcount[k] = len(list(filter(lambda v: v['status'] is k,
                                                plist)))
     # Compute thread
     self.processcount['thread'] = sum(i['num_threads'] for i in plist
                                       if i['num_threads'] is not None)
     # Compute total
     self.processcount['total'] = len(plist)
예제 #11
0
 def update_processcount(self, plist):
     """Update the global process count from the current processes list"""
     # Update the maximum process ID (pid) number
     self.processcount['pid_max'] = self.pid_max
     # For each key in the processcount dict
     # count the number of processes with the same status
     for k in iterkeys(self.processcount):
         self.processcount[k] = len(list(filter(lambda v: v['status'] is k,
                                                plist)))
     # Compute thread
     self.processcount['thread'] = sum(i['num_threads'] for i in plist
                                       if i['num_threads'] is not None)
     # Compute total
     self.processcount['total'] = len(plist)
예제 #12
0
    def update_snmp(self):
        """Update CPU stats using SNMP."""

        # Init new stats
        stats = self.get_init_value()

        # Update stats using SNMP
        if self.short_system_name in ('windows', 'esxi'):
            # Windows or VMWare ESXi
            # You can find the CPU utilization of windows system by querying the oid
            # Give also the number of core (number of element in the table)
            try:
                cpu_stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                bulk=True)
            except KeyError:
                self.reset()

            # Iter through CPU and compute the idle CPU stats
            stats['nb_log_core'] = 0
            stats['idle'] = 0
            for c in cpu_stats:
                if c.startswith('percent'):
                    stats['idle'] += float(cpu_stats['percent.3'])
                    stats['nb_log_core'] += 1
            if stats['nb_log_core'] > 0:
                stats['idle'] = stats['idle'] / stats['nb_log_core']
            stats['idle'] = 100 - stats['idle']
            stats['total'] = 100 - stats['idle']

        else:
            # Default behavor
            try:
                stats = self.get_stats_snmp(
                    snmp_oid=snmp_oid[self.short_system_name])
            except KeyError:
                stats = self.get_stats_snmp(
                    snmp_oid=snmp_oid['default'])

            if stats['idle'] == '':
                self.reset()
                return self.stats

            # Convert SNMP stats to float
            for key in iterkeys(stats):
                stats[key] = float(stats[key])
            stats['total'] = 100 - stats['idle']

        return stats
예제 #13
0
파일: glances_cpu.py 프로젝트: zysr/glances
    def update_snmp(self):
        """Update CPU stats using SNMP."""

        # Init new stats
        stats = self.get_init_value()

        # Update stats using SNMP
        if self.short_system_name in ('windows', 'esxi'):
            # Windows or VMWare ESXi
            # You can find the CPU utilization of windows system by querying the oid
            # Give also the number of core (number of element in the table)
            try:
                cpu_stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                bulk=True)
            except KeyError:
                self.reset()

            # Iter through CPU and compute the idle CPU stats
            stats['nb_log_core'] = 0
            stats['idle'] = 0
            for c in cpu_stats:
                if c.startswith('percent'):
                    stats['idle'] += float(cpu_stats['percent.3'])
                    stats['nb_log_core'] += 1
            if stats['nb_log_core'] > 0:
                stats['idle'] = stats['idle'] / stats['nb_log_core']
            stats['idle'] = 100 - stats['idle']
            stats['total'] = 100 - stats['idle']

        else:
            # Default behavor
            try:
                stats = self.get_stats_snmp(
                    snmp_oid=snmp_oid[self.short_system_name])
            except KeyError:
                stats = self.get_stats_snmp(
                    snmp_oid=snmp_oid['default'])

            if stats['idle'] == '':
                self.reset()
                return self.stats

            # Convert SNMP stats to float
            for key in iterkeys(stats):
                stats[key] = float(stats[key])
            stats['total'] = 100 - stats['idle']

        return stats
예제 #14
0
    def update(self):
        """Update RAM memory stats using the input method."""
        # Reset stats
        self.reset()

        if self.input_method == "local":
            # Update stats using the standard system lib
            # Grab MEM using the PSUtil virtual_memory method
            vm_stats = psutil.virtual_memory()

            # Get all the memory stats (copy/paste of the PsUtil documentation)
            # total: total physical memory available.
            # available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
            # percent: the percentage usage calculated as (total - available) / total * 100.
            # used: memory used, calculated differently depending on the platform and designed for informational purposes only.
            # free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
            # Platform-specific fields:
            # active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
            # inactive: (UNIX): memory that is marked as not used.
            # buffers: (Linux, BSD): cache for things like file system metadata.
            # cached: (Linux, BSD): cache for various things.
            # wired: (BSD, OSX): memory that is marked to always stay in RAM. It is never moved to disk.
            # shared: (BSD): memory that may be simultaneously accessed by multiple processes.
            self.reset()
            for mem in [
                "total",
                "available",
                "percent",
                "used",
                "free",
                "active",
                "inactive",
                "buffers",
                "cached",
                "wired",
                "shared",
            ]:
                if hasattr(vm_stats, mem):
                    self.stats[mem] = getattr(vm_stats, mem)

            # Use the 'free'/htop calculation
            # free=available+buffer+cached
            self.stats["free"] = self.stats["available"]
            if hasattr(self.stats, "buffers"):
                self.stats["free"] += self.stats["buffers"]
            if hasattr(self.stats, "cached"):
                self.stats["free"] += self.stats["cached"]
            # used=total-free
            self.stats["used"] = self.stats["total"] - self.stats["free"]
        elif self.input_method == "snmp":
            # Update stats using SNMP
            if self.short_system_name in ("windows", "esxi"):
                # Mem stats for Windows|Vmware Esxi are stored in the FS table
                try:
                    fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True)
                except KeyError:
                    self.reset()
                else:
                    for fs in fs_stat:
                        # The Physical Memory (Windows) or Real Memory (VMware)
                        # gives statistics on RAM usage and availability.
                        if fs in ("Physical Memory", "Real Memory"):
                            self.stats["total"] = int(fs_stat[fs]["size"]) * int(fs_stat[fs]["alloc_unit"])
                            self.stats["used"] = int(fs_stat[fs]["used"]) * int(fs_stat[fs]["alloc_unit"])
                            self.stats["percent"] = float(self.stats["used"] * 100 / self.stats["total"])
                            self.stats["free"] = self.stats["total"] - self.stats["used"]
                            break
            else:
                # Default behavor for others OS
                self.stats = self.get_stats_snmp(snmp_oid=snmp_oid["default"])

                if self.stats["total"] == "":
                    self.reset()
                    return self.stats

                for key in iterkeys(self.stats):
                    if self.stats[key] != "":
                        self.stats[key] = float(self.stats[key]) * 1024

                # Use the 'free'/htop calculation
                self.stats["free"] = (
                    self.stats["free"] - self.stats["total"] + (self.stats["buffers"] + self.stats["cached"])
                )

                # used=total-free
                self.stats["used"] = self.stats["total"] - self.stats["free"]

                # percent: the percentage usage calculated as (total - available) / total * 100.
                self.stats["percent"] = float((self.stats["total"] - self.stats["free"]) / self.stats["total"] * 100)

        # Update the history list
        self.update_stats_history()

        # Update the view
        self.update_views()

        return self.stats
예제 #15
0
    def msg_curse(self, args=None):
        """Return the dict to display in the curse interface."""
        # Init the return message
        ret = []

        # Only process if stats exist and display plugin enable...
        if not self.stats or args.disable_raid:
            return ret

        # Build the string message
        # Header
        msg = "{0:11}".format("RAID disks")
        ret.append(self.curse_add_line(msg, "TITLE"))
        msg = "{0:>6}".format("Used")
        ret.append(self.curse_add_line(msg))
        msg = "{0:>6}".format("Avail")
        ret.append(self.curse_add_line(msg))
        # Data
        arrays = sorted(iterkeys(self.stats))
        for array in arrays:
            # New line
            ret.append(self.curse_new_line())
            # Display the current status
            status = self.raid_alert(
                self.stats[array]["status"], self.stats[array]["used"], self.stats[array]["available"]
            )
            # Data: RAID type name | disk used | disk available
            array_type = self.stats[array]["type"].upper() if self.stats[array]["type"] is not None else "UNKNOWN"
            msg = "{0:<5}{1:>6}".format(array_type, array)
            ret.append(self.curse_add_line(msg))
            if self.stats[array]["status"] == "active":
                msg = "{0:>6}".format(self.stats[array]["used"])
                ret.append(self.curse_add_line(msg, status))
                msg = "{0:>6}".format(self.stats[array]["available"])
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]["status"] == "inactive":
                ret.append(self.curse_new_line())
                msg = "└─ Status {0}".format(self.stats[array]["status"])
                ret.append(self.curse_add_line(msg, status))
                components = sorted(iterkeys(self.stats[array]["components"]))
                for i, component in enumerate(components):
                    if i == len(components) - 1:
                        tree_char = "└─"
                    else:
                        tree_char = "├─"
                    ret.append(self.curse_new_line())
                    msg = "   {0} disk {1}: ".format(tree_char, self.stats[array]["components"][component])
                    ret.append(self.curse_add_line(msg))
                    msg = "{0}".format(component)
                    ret.append(self.curse_add_line(msg))
            if self.stats[array]["used"] < self.stats[array]["available"]:
                # Display current array configuration
                ret.append(self.curse_new_line())
                msg = "└─ Degraded mode"
                ret.append(self.curse_add_line(msg, status))
                if len(self.stats[array]["config"]) < 17:
                    ret.append(self.curse_new_line())
                    msg = "   └─ {0}".format(self.stats[array]["config"].replace("_", "A"))
                    ret.append(self.curse_add_line(msg))

        return ret
예제 #16
0
    def update(self):
        """Update swap memory stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        if self.input_method == 'local':
            # Update stats using the standard system lib
            # Grab SWAP using the psutil swap_memory method
            sm_stats = psutil.swap_memory()

            # Get all the swap stats (copy/paste of the psutil documentation)
            # total: total swap memory in bytes
            # used: used swap memory in bytes
            # free: free swap memory in bytes
            # percent: the percentage usage
            # sin: the number of bytes the system has swapped in from disk (cumulative)
            # sout: the number of bytes the system has swapped out from disk
            # (cumulative)
            for swap in ['total', 'used', 'free', 'percent',
                         'sin', 'sout']:
                if hasattr(sm_stats, swap):
                    stats[swap] = getattr(sm_stats, swap)
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            if self.short_system_name == 'windows':
                # Mem stats for Windows OS are stored in the FS table
                try:
                    fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                  bulk=True)
                except KeyError:
                    self.reset()
                else:
                    for fs in fs_stat:
                        # The virtual memory concept is used by the operating
                        # system to extend (virtually) the physical memory and
                        # thus to run more programs by swapping unused memory
                        # zone (page) to a disk file.
                        if fs == 'Virtual Memory':
                            stats['total'] = int(
                                fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
                            stats['used'] = int(
                                fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
                            stats['percent'] = float(
                                stats['used'] * 100 / stats['total'])
                            stats['free'] = stats['total'] - stats['used']
                            break
            else:
                stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])

                if stats['total'] == '':
                    self.reset()
                    return stats

                for key in iterkeys(stats):
                    if stats[key] != '':
                        stats[key] = float(stats[key]) * 1024

                # used=total-free
                stats['used'] = stats['total'] - stats['free']

                # percent: the percentage usage calculated as (total -
                # available) / total * 100.
                stats['percent'] = float(
                    (stats['total'] - stats['free']) / stats['total'] * 100)

        # Update the stats
        self.stats = stats

        return self.stats
예제 #17
0
    def update(self):
        """Update Docker stats using the input method."""
        # Reset stats
        self.reset()

        # Get the current Docker API client
        if not self.docker_client:
            # First time, try to connect to the server
            self.docker_client = self.connect()
            if self.docker_client is None:
                global docker_tag
                docker_tag = False

        # The Docker-py lib is mandatory
        if not docker_tag or (self.args is not None and self.args.disable_docker):
            return self.stats

        if self.input_method == 'local':
            # Update stats

            # Docker version
            # Exemple: {
            #     "KernelVersion": "3.16.4-tinycore64",
            #     "Arch": "amd64",
            #     "ApiVersion": "1.15",
            #     "Version": "1.3.0",
            #     "GitCommit": "c78088f",
            #     "Os": "linux",
            #     "GoVersion": "go1.3.3"
            # }
            try:
                self.stats['version'] = self.docker_client.version()
            except Exception as e:
                # Correct issue#649
                logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e))
                return self.stats

            # Container globals information
            # Example: [{u'Status': u'Up 36 seconds',
            #            u'Created': 1420378904,
            #            u'Image': u'nginx:1',
            #            u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443},
            #                       {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}],
            #            u'Command': u"nginx -g 'daemon off;'",
            #            u'Names': [u'/webstack_nginx_1'],
            #            u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}]

            # Update current containers list
            try:
                self.stats['containers'] = self.docker_client.containers() or []
            except Exception as e:
                logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e))
                return self.stats

            # Start new thread for new container
            for container in self.stats['containers']:
                if container['Id'] not in self.thread_list:
                    # Thread did not exist in the internal dict
                    # Create it and add it to the internal dict
                    logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container['Id'][:12]))
                    t = ThreadDockerGrabber(self.docker_client, container['Id'])
                    self.thread_list[container['Id']] = t
                    t.start()

            # Stop threads for non-existing containers
            nonexisting_containers = set(iterkeys(self.thread_list)) - set([c['Id'] for c in self.stats['containers']])
            for container_id in nonexisting_containers:
                # Stop the thread
                logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
                self.thread_list[container_id].stop()
                # Delete the item from the dict
                del self.thread_list[container_id]

            # Get stats for all containers
            for container in self.stats['containers']:
                # The key is the container name and not the Id
                container['key'] = self.get_key()

                # Export name (first name in the list, without the /)
                container['name'] = container['Names'][0][1:]

                container['cpu'] = self.get_docker_cpu(container['Id'], self.thread_list[container['Id']].stats)
                container['memory'] = self.get_docker_memory(container['Id'], self.thread_list[container['Id']].stats)
                container['network'] = self.get_docker_network(container['Id'], self.thread_list[container['Id']].stats)
                container['io'] = self.get_docker_io(container['Id'], self.thread_list[container['Id']].stats)

        elif self.input_method == 'snmp':
            # Update stats using SNMP
            # Not available
            pass

        return self.stats
예제 #18
0
    def update(self):
        """Update RAM memory stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        if self.input_method == 'local':
            # Update stats using the standard system lib
            # Grab MEM using the psutil virtual_memory method
            vm_stats = psutil.virtual_memory()

            # Get all the memory stats (copy/paste of the psutil documentation)
            # total: total physical memory available.
            # available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
            # percent: the percentage usage calculated as (total - available) / total * 100.
            # used: memory used, calculated differently depending on the platform and designed for informational purposes only.
            # free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
            # Platform-specific fields:
            # active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
            # inactive: (UNIX): memory that is marked as not used.
            # buffers: (Linux, BSD): cache for things like file system metadata.
            # cached: (Linux, BSD): cache for various things.
            # wired: (BSD, macOS): memory that is marked to always stay in RAM. It is never moved to disk.
            # shared: (BSD): memory that may be simultaneously accessed by multiple processes.
            self.reset()
            for mem in ['total', 'available', 'percent', 'used', 'free',
                        'active', 'inactive', 'buffers', 'cached',
                        'wired', 'shared']:
                if hasattr(vm_stats, mem):
                    stats[mem] = getattr(vm_stats, mem)

            # Use the 'free'/htop calculation
            # free=available+buffer+cached
            stats['free'] = stats['available']
            if hasattr(stats, 'buffers'):
                stats['free'] += stats['buffers']
            if hasattr(stats, 'cached'):
                stats['free'] += stats['cached']
            # used=total-free
            stats['used'] = stats['total'] - stats['free']
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            if self.short_system_name in ('windows', 'esxi'):
                # Mem stats for Windows|Vmware Esxi are stored in the FS table
                try:
                    fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                  bulk=True)
                except KeyError:
                    self.reset()
                else:
                    for fs in fs_stat:
                        # The Physical Memory (Windows) or Real Memory (VMware)
                        # gives statistics on RAM usage and availability.
                        if fs in ('Physical Memory', 'Real Memory'):
                            stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
                            stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
                            stats['percent'] = float(stats['used'] * 100 / stats['total'])
                            stats['free'] = stats['total'] - stats['used']
                            break
            else:
                # Default behavor for others OS
                stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])

                if stats['total'] == '':
                    self.reset()
                    return self.stats

                for key in iterkeys(stats):
                    if stats[key] != '':
                        stats[key] = float(stats[key]) * 1024

                # Use the 'free'/htop calculation
                stats['free'] = stats['free'] - stats['total'] + (stats['buffers'] + stats['cached'])

                # used=total-free
                stats['used'] = stats['total'] - stats['free']

                # percent: the percentage usage calculated as (total - available) / total * 100.
                stats['percent'] = float((stats['total'] - stats['free']) / stats['total'] * 100)

        # Update the stats
        self.stats = stats

        return self.stats
예제 #19
0
    def msg_curse(self, args=None):
        """Return the dict to display in the curse interface."""
        # Init the return message
        ret = []

        # Only process if stats exist and display plugin enable...
        if not self.stats or args.disable_raid:
            return ret

        # Build the string message
        # Header
        msg = '{0:11}'.format('RAID disks')
        ret.append(self.curse_add_line(msg, "TITLE"))
        msg = '{0:>6}'.format('Used')
        ret.append(self.curse_add_line(msg))
        msg = '{0:>6}'.format('Avail')
        ret.append(self.curse_add_line(msg))
        # Data
        arrays = sorted(iterkeys(self.stats))
        for array in arrays:
            # New line
            ret.append(self.curse_new_line())
            # Display the current status
            status = self.raid_alert(self.stats[array]['status'],
                                     self.stats[array]['used'],
                                     self.stats[array]['available'])
            # Data: RAID type name | disk used | disk available
            array_type = self.stats[array]['type'].upper(
            ) if self.stats[array]['type'] is not None else 'UNKNOWN'
            msg = '{0:<5}{1:>6}'.format(array_type, array)
            ret.append(self.curse_add_line(msg))
            if self.stats[array]['status'] == 'active':
                msg = '{0:>6}'.format(self.stats[array]['used'])
                ret.append(self.curse_add_line(msg, status))
                msg = '{0:>6}'.format(self.stats[array]['available'])
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'inactive':
                ret.append(self.curse_new_line())
                msg = '└─ Status {0}'.format(self.stats[array]['status'])
                ret.append(self.curse_add_line(msg, status))
                components = sorted(iterkeys(self.stats[array]['components']))
                for i, component in enumerate(components):
                    if i == len(components) - 1:
                        tree_char = '└─'
                    else:
                        tree_char = '├─'
                    ret.append(self.curse_new_line())
                    msg = '   {0} disk {1}: '.format(
                        tree_char, self.stats[array]['components'][component])
                    ret.append(self.curse_add_line(msg))
                    msg = '{0}'.format(component)
                    ret.append(self.curse_add_line(msg))
            if self.stats[array]['used'] < self.stats[array]['available']:
                # Display current array configuration
                ret.append(self.curse_new_line())
                msg = '└─ Degraded mode'
                ret.append(self.curse_add_line(msg, status))
                if len(self.stats[array]['config']) < 17:
                    ret.append(self.curse_new_line())
                    msg = '   └─ {0}'.format(
                        self.stats[array]['config'].replace('_', 'A'))
                    ret.append(self.curse_add_line(msg))

        return ret
예제 #20
0
    def msg_curse(self, args=None, max_width=None):
        """Return the dict to display in the curse interface."""
        # Init the return message
        ret = []

        # Only process if stats exist...
        if not self.stats:
            return ret

        # Max size for the interface name
        name_max_width = max_width - 12

        # Header
        msg = '{:{width}}'.format('RAID disks',
                                  width=name_max_width)
        ret.append(self.curse_add_line(msg, "TITLE"))
        msg = '{:>7}'.format('Used')
        ret.append(self.curse_add_line(msg))
        msg = '{:>7}'.format('Avail')
        ret.append(self.curse_add_line(msg))
        # Data
        arrays = sorted(iterkeys(self.stats))
        for array in arrays:
            # New line
            ret.append(self.curse_new_line())
            # Display the current status
            status = self.raid_alert(self.stats[array]['status'],
                                     self.stats[array]['used'],
                                     self.stats[array]['available'],
                                     self.stats[array]['type'])
            # Data: RAID type name | disk used | disk available
            array_type = self.stats[array]['type'].upper() if self.stats[array]['type'] is not None else 'UNKNOWN'
            # Build the full name = array type + array name
            full_name = '{} {}'.format(array_type, array)
            msg = '{:{width}}'.format(full_name,
                                      width=name_max_width)
            ret.append(self.curse_add_line(msg))
            if self.stats[array]['type'] == 'raid0' and self.stats[array]['status'] == 'active':
                msg = '{:>7}'.format(len(self.stats[array]['components']))
                ret.append(self.curse_add_line(msg, status))
                msg = '{:>7}'.format('-')
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'active':
                msg = '{:>7}'.format(self.stats[array]['used'])
                ret.append(self.curse_add_line(msg, status))
                msg = '{:>7}'.format(self.stats[array]['available'])
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'inactive':
                ret.append(self.curse_new_line())
                msg = '└─ Status {}'.format(self.stats[array]['status'])
                ret.append(self.curse_add_line(msg, status))
                components = sorted(iterkeys(self.stats[array]['components']))
                for i, component in enumerate(components):
                    if i == len(components) - 1:
                        tree_char = '└─'
                    else:
                        tree_char = '├─'
                    ret.append(self.curse_new_line())
                    msg = '   {} disk {}: '.format(tree_char, self.stats[array]['components'][component])
                    ret.append(self.curse_add_line(msg))
                    msg = '{}'.format(component)
                    ret.append(self.curse_add_line(msg))
            if self.stats[array]['type'] != 'raid0' and (self.stats[array]['used'] < self.stats[array]['available']):
                # Display current array configuration
                ret.append(self.curse_new_line())
                msg = '└─ Degraded mode'
                ret.append(self.curse_add_line(msg, status))
                if len(self.stats[array]['config']) < 17:
                    ret.append(self.curse_new_line())
                    msg = '   └─ {}'.format(self.stats[array]['config'].replace('_', 'A'))
                    ret.append(self.curse_add_line(msg))

        return ret
예제 #21
0
    def update(self):
        """Update Docker stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        # The Docker-py lib is mandatory
        if import_error_tag:
            return self.stats

        if self.input_method == 'local':
            # Update stats

            # Docker version
            # Exemple: {
            #     "KernelVersion": "3.16.4-tinycore64",
            #     "Arch": "amd64",
            #     "ApiVersion": "1.15",
            #     "Version": "1.3.0",
            #     "GitCommit": "c78088f",
            #     "Os": "linux",
            #     "GoVersion": "go1.3.3"
            # }
            try:
                stats['version'] = self.docker_client.version()
            except Exception as e:
                # Correct issue#649
                logger.error(
                    "{} plugin - Cannot get Docker version ({})".format(
                        self.plugin_name, e))
                # We may have lost connection remove version info
                if 'version' in self.stats:
                    del self.stats['version']
                self.stats['containers'] = []
                return self.stats

            # Update current containers list
            try:
                # Issue #1152: Docker module doesn't export details about stopped containers
                # The Docker/all key of the configuration file should be set to True
                containers = self.docker_client.containers.list(
                    all=self._all_tag()) or []
            except Exception as e:
                logger.error(
                    "{} plugin - Cannot get containers list ({})".format(
                        self.plugin_name, e))
                # We may have lost connection empty the containers list.
                self.stats['containers'] = []
                return self.stats

            # Start new thread for new container
            for container in containers:
                if container.id not in self.thread_list:
                    # Thread did not exist in the internal dict
                    # Create it and add it to the internal dict
                    logger.debug(
                        "{} plugin - Create thread for container {}".format(
                            self.plugin_name, container.id[:12]))
                    t = ThreadDockerGrabber(container)
                    self.thread_list[container.id] = t
                    t.start()

            # Stop threads for non-existing containers
            nonexisting_containers = set(iterkeys(self.thread_list)) - set(
                [c.id for c in containers])
            for container_id in nonexisting_containers:
                # Stop the thread
                logger.debug(
                    "{} plugin - Stop thread for old container {}".format(
                        self.plugin_name, container_id[:12]))
                self.thread_list[container_id].stop()
                # Delete the item from the dict
                del self.thread_list[container_id]

            # Get stats for all containers
            stats['containers'] = []
            for container in containers:
                # Init the stats for the current container
                container_stats = {}
                # The key is the container name and not the Id
                container_stats['key'] = self.get_key()
                # Export name (first name in the Names list, without the /)
                container_stats['name'] = nativestr(container.name)
                # Export global Names (used by the WebUI)
                container_stats['Names'] = [nativestr(container.name)]
                # Container Id
                container_stats['Id'] = container.id
                # Container Image
                container_stats['Image'] = container.image.tags
                # Global stats (from attrs)
                container_stats['Status'] = container.attrs['State']['Status']
                container_stats['Command'] = container.attrs['Config'][
                    'Entrypoint']
                # Standards stats
                if container_stats['Status'] in ('running', 'paused'):
                    container_stats['cpu'] = self.get_docker_cpu(
                        container.id, self.thread_list[container.id].stats)
                    container_stats['cpu_percent'] = container_stats[
                        'cpu'].get('total', None)
                    container_stats['memory'] = self.get_docker_memory(
                        container.id, self.thread_list[container.id].stats)
                    container_stats['memory_usage'] = container_stats[
                        'memory'].get('usage', None)
                    container_stats['io'] = self.get_docker_io(
                        container.id, self.thread_list[container.id].stats)
                    container_stats['io_r'] = container_stats['io'].get(
                        'ior', None)
                    container_stats['io_w'] = container_stats['io'].get(
                        'iow', None)
                    container_stats['network'] = self.get_docker_network(
                        container.id, self.thread_list[container.id].stats)
                    container_stats['network_rx'] = container_stats[
                        'network'].get('rx', None)
                    container_stats['network_tx'] = container_stats[
                        'network'].get('tx', None)
                else:
                    container_stats['cpu'] = {}
                    container_stats['cpu_percent'] = None
                    container_stats['memory'] = {}
                    container_stats['memory_percent'] = None
                    container_stats['io'] = {}
                    container_stats['io_r'] = None
                    container_stats['io_w'] = None
                    container_stats['network'] = {}
                    container_stats['network_rx'] = None
                    container_stats['network_tx'] = None
                # Add current container stats to the stats list
                stats['containers'].append(container_stats)

        elif self.input_method == 'snmp':
            # Update stats using SNMP
            # Not available
            pass

        # Sort and update the stats
        self.stats = sort_stats(stats)

        return self.stats
예제 #22
0
    def update(self):
        """Update swap memory stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        if self.input_method == 'local':
            # Update stats using the standard system lib
            # Grab SWAP using the psutil swap_memory method
            try:
                sm_stats = psutil.swap_memory()
            except RuntimeError:
                # Crash on startup on Illumos when no swap is configured #1767
                pass
            else:
                # Get all the swap stats (copy/paste of the psutil documentation)
                # total: total swap memory in bytes
                # used: used swap memory in bytes
                # free: free swap memory in bytes
                # percent: the percentage usage
                # sin: the number of bytes the system has swapped in from disk (cumulative)
                # sout: the number of bytes the system has swapped out from disk
                # (cumulative)
                for swap in [
                        'total', 'used', 'free', 'percent', 'sin', 'sout'
                ]:
                    if hasattr(sm_stats, swap):
                        stats[swap] = getattr(sm_stats, swap)
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            if self.short_system_name == 'windows':
                # Mem stats for Windows OS are stored in the FS table
                try:
                    fs_stat = self.get_stats_snmp(
                        snmp_oid=snmp_oid[self.short_system_name], bulk=True)
                except KeyError:
                    self.reset()
                else:
                    for fs in fs_stat:
                        # The virtual memory concept is used by the operating
                        # system to extend (virtually) the physical memory and
                        # thus to run more programs by swapping unused memory
                        # zone (page) to a disk file.
                        if fs == 'Virtual Memory':
                            stats['total'] = int(fs_stat[fs]['size']) * int(
                                fs_stat[fs]['alloc_unit'])
                            stats['used'] = int(fs_stat[fs]['used']) * int(
                                fs_stat[fs]['alloc_unit'])
                            stats['percent'] = float(stats['used'] * 100 /
                                                     stats['total'])
                            stats['free'] = stats['total'] - stats['used']
                            break
            else:
                stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])

                if stats['total'] == '':
                    self.reset()
                    return stats

                for key in iterkeys(stats):
                    if stats[key] != '':
                        stats[key] = float(stats[key]) * 1024

                # used=total-free
                stats['used'] = stats['total'] - stats['free']

                # percent: the percentage usage calculated as (total -
                # available) / total * 100.
                stats['percent'] = float(
                    (stats['total'] - stats['free']) / stats['total'] * 100)

        # Update the stats
        self.stats = stats

        return self.stats
예제 #23
0
    def update(self):
        """Update CPU stats using the input method."""
        # Reset stats
        self.reset()

        # Grab CPU stats using psutil's cpu_percent and cpu_times_percent
        # methods
        if self.input_method == 'local':
            # Get all possible values for CPU stats: user, system, idle,
            # nice (UNIX), iowait (Linux), irq (Linux, FreeBSD), steal (Linux 2.6.11+)
            # The following stats are returned by the API but not displayed in the UI:
            # softirq (Linux), guest (Linux 2.6.24+), guest_nice (Linux 3.2.0+)
            self.stats['total'] = cpu_percent.get()
            cpu_times_percent = psutil.cpu_times_percent(interval=0.0)
            for stat in ['user', 'system', 'idle', 'nice', 'iowait',
                         'irq', 'softirq', 'steal', 'guest', 'guest_nice']:
                if hasattr(cpu_times_percent, stat):
                    self.stats[stat] = getattr(cpu_times_percent, stat)
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            if self.short_system_name in ('windows', 'esxi'):
                # Windows or VMWare ESXi
                # You can find the CPU utilization of windows system by querying the oid
                # Give also the number of core (number of element in the table)
                try:
                    cpu_stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                    bulk=True)
                except KeyError:
                    self.reset()

                # Iter through CPU and compute the idle CPU stats
                self.stats['nb_log_core'] = 0
                self.stats['idle'] = 0
                for c in cpu_stats:
                    if c.startswith('percent'):
                        self.stats['idle'] += float(cpu_stats['percent.3'])
                        self.stats['nb_log_core'] += 1
                if self.stats['nb_log_core'] > 0:
                    self.stats['idle'] = self.stats[
                        'idle'] / self.stats['nb_log_core']
                self.stats['idle'] = 100 - self.stats['idle']
                self.stats['total'] = 100 - self.stats['idle']

            else:
                # Default behavor
                try:
                    self.stats = self.get_stats_snmp(
                        snmp_oid=snmp_oid[self.short_system_name])
                except KeyError:
                    self.stats = self.get_stats_snmp(
                        snmp_oid=snmp_oid['default'])

                if self.stats['idle'] == '':
                    self.reset()
                    return self.stats

                # Convert SNMP stats to float
                for key in iterkeys(self.stats):
                    self.stats[key] = float(self.stats[key])
                self.stats['total'] = 100 - self.stats['idle']

        # Update the history list
        self.update_stats_history()

        # Update the view
        self.update_views()

        return self.stats
예제 #24
0
    def update(self):
        """Update Docker stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        # The Docker-py lib is mandatory
        if import_error_tag:
            return self.stats

        if self.input_method == 'local':
            # Update stats

            # Docker version
            # Exemple: {
            #     "KernelVersion": "3.16.4-tinycore64",
            #     "Arch": "amd64",
            #     "ApiVersion": "1.15",
            #     "Version": "1.3.0",
            #     "GitCommit": "c78088f",
            #     "Os": "linux",
            #     "GoVersion": "go1.3.3"
            # }
            try:
                stats['version'] = self.docker_client.version()
            except Exception as e:
                # Correct issue#649
                logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e))
                return self.stats

            # Update current containers list
            try:
                # Issue #1152: Docker module doesn't export details about stopped containers
                # The Docker/all key of the configuration file should be set to True
                containers = self.docker_client.containers.list(all=self._all_tag()) or []
            except Exception as e:
                logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e))
                return self.stats

            # Start new thread for new container
            for container in containers:
                if container.id not in self.thread_list:
                    # Thread did not exist in the internal dict
                    # Create it and add it to the internal dict
                    logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]))
                    t = ThreadDockerGrabber(container)
                    self.thread_list[container.id] = t
                    t.start()

            # Stop threads for non-existing containers
            nonexisting_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
            for container_id in nonexisting_containers:
                # Stop the thread
                logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
                self.thread_list[container_id].stop()
                # Delete the item from the dict
                del self.thread_list[container_id]

            # Get stats for all containers
            stats['containers'] = []
            for container in containers:
                # Init the stats for the current container
                container_stats = {}
                # The key is the container name and not the Id
                container_stats['key'] = self.get_key()
                # Export name (first name in the Names list, without the /)
                container_stats['name'] = nativestr(container.name)
                # Export global Names (used by the WebUI)
                container_stats['Names'] = [ nativestr(container.name)]
                # Container Id
                container_stats['Id'] = container.id
                # Container Image
                container_stats['Image'] = container.image.tags
                # Global stats (from attrs)
                container_stats['Status'] = container.attrs['State']['Status']
                container_stats['Command'] = container.attrs['Config']['Entrypoint']
                # Standards stats
                if container_stats['Status'] in ('running', 'paused'):
                    container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats)
                    container_stats['cpu_percent'] = container_stats['cpu'].get('total', None)
                    container_stats['memory'] = self.get_docker_memory(container.id, self.thread_list[container.id].stats)
                    container_stats['memory_usage'] = container_stats['memory'].get('usage', None)
                    container_stats['io'] = self.get_docker_io(container.id, self.thread_list[container.id].stats)
                    container_stats['io_r'] = container_stats['io'].get('ior', None)
                    container_stats['io_w'] = container_stats['io'].get('iow', None)
                    container_stats['network'] = self.get_docker_network(container.id, self.thread_list[container.id].stats)
                    container_stats['network_rx'] = container_stats['network'].get('rx', None)
                    container_stats['network_tx'] = container_stats['network'].get('tx', None)
                else:
                    container_stats['cpu'] = {}
                    container_stats['cpu_percent'] = None
                    container_stats['memory'] = {}
                    container_stats['memory_percent'] = None
                    container_stats['io'] = {}
                    container_stats['io_r'] = None
                    container_stats['io_w'] = None
                    container_stats['network'] = {}
                    container_stats['network_rx'] = None
                    container_stats['network_tx'] = None
                # Add current container stats to the stats list
                stats['containers'].append(container_stats)

        elif self.input_method == 'snmp':
            # Update stats using SNMP
            # Not available
            pass

        # Update the stats
        self.stats = stats

        return self.stats
예제 #25
0
파일: graph.py 프로젝트: nicolargo/glances
    def generate_graph(self, stats):
        """Generate graphs from plugins history.

        Return the number of output files generated by the function.
        """
        if not self.graph_enabled():
            return 0

        index_all = 0
        for p in stats.getAllPlugins():
            # History
            h = stats.get_plugin(p).get_export_history()
            # Current plugin item history list
            ih = stats.get_plugin(p).get_items_history_list()
            # Check if we must process history
            if h is None or ih is None:
                # History (h) not available for plugin (p)
                continue
            # Init graph
            plt.clf()
            index_graph = 0
            handles = []
            labels = []
            for i in ih:
                if i["name"] in iterkeys(h):
                    # The key exist
                    # Add the curves in the current chart
                    logger.debug("Generate graph: %s %s" % (p, i["name"]))
                    index_graph += 1
                    # Labels
                    handles.append(
                        plt.Rectangle((0, 0), 1, 1, fc=self.get_graph_color(i), ec=self.get_graph_color(i), linewidth=2)
                    )
                    labels.append(self.get_graph_legend(i))
                    # Legend
                    plt.ylabel(self.get_graph_yunit(i, pre_label=""))
                    # Curves
                    plt.grid(True)
                    # Points are stored as tuple (date, value)
                    x, y = zip(*h[i["name"]])
                    plt.plot_date(
                        x,
                        y,
                        fmt="",
                        drawstyle="default",
                        linestyle="-",
                        color=self.get_graph_color(i),
                        xdate=True,
                        ydate=False,
                    )
                    if index_graph == 1:
                        # Title only on top of the first graph
                        plt.title(p.capitalize())
                else:
                    # The key did not exist
                    # Find if anothers key ends with the key
                    # Ex: key='tx' => 'ethernet_tx'
                    # Add one curve per chart
                    stats_history_filtered = sorted([key for key in iterkeys(h) if key.endswith("_" + i["name"])])
                    logger.debug("Generate graphs: %s %s" % (p, stats_history_filtered))
                    if len(stats_history_filtered) > 0:
                        # Create 'n' graph
                        # Each graph iter through the stats
                        plt.clf()
                        index_item = 0
                        for k in stats_history_filtered:
                            index_item += 1
                            plt.subplot(len(stats_history_filtered), 1, index_item)
                            # Legend
                            plt.ylabel(self.get_graph_yunit(i, pre_label=k))
                            # Curves
                            plt.grid(True)
                            # Points are stored as tuple (date, value)
                            x, y = zip(*h[k])
                            plt.plot_date(
                                x,
                                y,
                                fmt="",
                                drawstyle="default",
                                linestyle="-",
                                color=self.get_graph_color(i),
                                xdate=True,
                                ydate=False,
                            )
                            if index_item == 1:
                                # Title only on top of the first graph
                                plt.title(p.capitalize() + " " + i["name"])
                        # Save the graph to output file
                        fig = plt.gcf()
                        fig.set_size_inches(20, 5 * index_item)
                        plt.xlabel("Date")
                        plt.savefig(os.path.join(self.output_folder, "glances_%s_%s.png" % (p, i["name"])), dpi=72)
                        index_all += 1

            if index_graph > 0:
                # Save the graph to output file
                fig = plt.gcf()
                fig.set_size_inches(20, 10)
                plt.legend(handles, labels, loc=1, prop={"size": 9})
                plt.xlabel("Date")
                plt.savefig(os.path.join(self.output_folder, "glances_%s.png" % p), dpi=72)
                index_all += 1

            plt.close()

        return index_all
예제 #26
0
    def generate_graph(self, stats):
        """Generate graphs from plugins history.

        Return the number of output files generated by the function.
        """
        if not self.graph_enabled():
            return 0

        index_all = 0
        for p in stats.getAllPlugins():
            h = stats.get_plugin(p).get_stats_history()
            # Data
            if h is None:
                # History (h) not available for plugin (p)
                continue
            # Init graph
            plt.clf()
            index_graph = 0
            handles = []
            labels = []
            for i in stats.get_plugin(p).get_items_history_list():
                if i['name'] in iterkeys(h):
                    # The key exist
                    # Add the curves in the current chart
                    logger.debug("Generate graph: %s %s" % (p, i['name']))
                    index_graph += 1
                    # Labels
                    handles.append(
                        plt.Rectangle((0, 0),
                                      1,
                                      1,
                                      fc=self.get_graph_color(i),
                                      ec=self.get_graph_color(i),
                                      linewidth=2))
                    labels.append(self.get_graph_legend(i))
                    # Legend
                    plt.ylabel(self.get_graph_yunit(i, pre_label=''))
                    # Curves
                    plt.grid(True)
                    plt.plot_date(h['date'],
                                  h[i['name']],
                                  fmt='',
                                  drawstyle='default',
                                  linestyle='-',
                                  color=self.get_graph_color(i),
                                  xdate=True,
                                  ydate=False)
                    if index_graph == 1:
                        # Title only on top of the first graph
                        plt.title(p.capitalize())
                else:
                    # The key did not exist
                    # Find if anothers key ends with the key
                    # Ex: key='tx' => 'ethernet_tx'
                    # Add one curve per chart
                    stats_history_filtered = sorted([
                        key for key in iterkeys(h)
                        if key.endswith('_' + i['name'])
                    ])
                    logger.debug("Generate graphs: %s %s" %
                                 (p, stats_history_filtered))
                    if len(stats_history_filtered) > 0:
                        # Create 'n' graph
                        # Each graph iter through the stats
                        plt.clf()
                        index_item = 0
                        for k in stats_history_filtered:
                            index_item += 1
                            plt.subplot(len(stats_history_filtered), 1,
                                        index_item)
                            plt.ylabel(self.get_graph_yunit(i, pre_label=k))
                            plt.grid(True)
                            plt.plot_date(h['date'],
                                          h[k],
                                          fmt='',
                                          drawstyle='default',
                                          linestyle='-',
                                          color=self.get_graph_color(i),
                                          xdate=True,
                                          ydate=False)
                            if index_item == 1:
                                # Title only on top of the first graph
                                plt.title(p.capitalize() + ' ' + i['name'])
                        # Save the graph to output file
                        fig = plt.gcf()
                        fig.set_size_inches(20, 5 * index_item)
                        plt.xlabel('Date')
                        plt.savefig(os.path.join(
                            self.output_folder,
                            'glances_%s_%s.png' % (p, i['name'])),
                                    dpi=72)
                        index_all += 1

            if index_graph > 0:
                # Save the graph to output file
                fig = plt.gcf()
                fig.set_size_inches(20, 10)
                plt.legend(handles, labels, loc=1, prop={'size': 9})
                plt.xlabel('Date')
                plt.savefig(os.path.join(self.output_folder,
                                         'glances_%s.png' % p),
                            dpi=72)
                index_all += 1

            plt.close()

        return index_all
예제 #27
0
    def msg_curse(self, args=None, max_width=None):
        """Return the dict to display in the curse interface."""
        # Init the return message
        ret = []

        # Only process if stats exist...
        if not self.stats:
            return ret

        # Max size for the interface name
        name_max_width = max_width - 12

        # Header
        msg = '{:{width}}'.format('RAID disks', width=name_max_width)
        ret.append(self.curse_add_line(msg, "TITLE"))
        msg = '{:>7}'.format('Used')
        ret.append(self.curse_add_line(msg))
        msg = '{:>7}'.format('Avail')
        ret.append(self.curse_add_line(msg))
        # Data
        arrays = sorted(iterkeys(self.stats))
        for array in arrays:
            # New line
            ret.append(self.curse_new_line())
            # Display the current status
            status = self.raid_alert(self.stats[array]['status'],
                                     self.stats[array]['used'],
                                     self.stats[array]['available'],
                                     self.stats[array]['type'])
            # Data: RAID type name | disk used | disk available
            array_type = self.stats[array]['type'].upper(
            ) if self.stats[array]['type'] is not None else 'UNKNOWN'
            # Build the full name = array type + array name
            full_name = '{} {}'.format(array_type, array)
            msg = '{:{width}}'.format(full_name, width=name_max_width)
            ret.append(self.curse_add_line(msg))
            if self.stats[array]['type'] == 'raid0' and self.stats[array][
                    'status'] == 'active':
                msg = '{:>7}'.format(len(self.stats[array]['components']))
                ret.append(self.curse_add_line(msg, status))
                msg = '{:>7}'.format('-')
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'active':
                msg = '{:>7}'.format(self.stats[array]['used'])
                ret.append(self.curse_add_line(msg, status))
                msg = '{:>7}'.format(self.stats[array]['available'])
                ret.append(self.curse_add_line(msg, status))
            elif self.stats[array]['status'] == 'inactive':
                ret.append(self.curse_new_line())
                msg = '└─ Status {}'.format(self.stats[array]['status'])
                ret.append(self.curse_add_line(msg, status))
                components = sorted(iterkeys(self.stats[array]['components']))
                for i, component in enumerate(components):
                    if i == len(components) - 1:
                        tree_char = '└─'
                    else:
                        tree_char = '├─'
                    ret.append(self.curse_new_line())
                    msg = '   {} disk {}: '.format(
                        tree_char, self.stats[array]['components'][component])
                    ret.append(self.curse_add_line(msg))
                    msg = '{}'.format(component)
                    ret.append(self.curse_add_line(msg))
            if self.stats[array]['type'] != 'raid0' and (
                    self.stats[array]['used'] <
                    self.stats[array]['available']):
                # Display current array configuration
                ret.append(self.curse_new_line())
                msg = '└─ Degraded mode'
                ret.append(self.curse_add_line(msg, status))
                if len(self.stats[array]['config']) < 17:
                    ret.append(self.curse_new_line())
                    msg = '   └─ {}'.format(
                        self.stats[array]['config'].replace('_', 'A'))
                    ret.append(self.curse_add_line(msg))

        return ret
예제 #28
0
    def update(self):
        """Update RAM memory stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        if self.input_method == 'local':
            # Update stats using the standard system lib
            # Grab MEM using the psutil virtual_memory method
            vm_stats = psutil.virtual_memory()

            # Get all the memory stats (copy/paste of the psutil documentation)
            # total: total physical memory available.
            # available: the actual amount of available memory that can be given instantly to processes that request more memory in bytes; this is calculated by summing different memory values depending on the platform (e.g. free + buffers + cached on Linux) and it is supposed to be used to monitor actual memory usage in a cross platform fashion.
            # percent: the percentage usage calculated as (total - available) / total * 100.
            # used: memory used, calculated differently depending on the platform and designed for informational purposes only.
            # free: memory not being used at all (zeroed) that is readily available; note that this doesn’t reflect the actual memory available (use ‘available’ instead).
            # Platform-specific fields:
            # active: (UNIX): memory currently in use or very recently used, and so it is in RAM.
            # inactive: (UNIX): memory that is marked as not used.
            # buffers: (Linux, BSD): cache for things like file system metadata.
            # cached: (Linux, BSD): cache for various things.
            # wired: (BSD, macOS): memory that is marked to always stay in RAM. It is never moved to disk.
            # shared: (BSD): memory that may be simultaneously accessed by multiple processes.
            self.reset()
            for mem in ['total', 'available', 'percent', 'used', 'free',
                        'active', 'inactive', 'buffers', 'cached',
                        'wired', 'shared']:
                if hasattr(vm_stats, mem):
                    stats[mem] = getattr(vm_stats, mem)

            # Use the 'free'/htop calculation
            # free=available+buffer+cached
            stats['free'] = stats['available']
            if hasattr(stats, 'buffers'):
                stats['free'] += stats['buffers']
            if hasattr(stats, 'cached'):
                stats['free'] += stats['cached']
            # used=total-free
            stats['used'] = stats['total'] - stats['free']
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            if self.short_system_name in ('windows', 'esxi'):
                # Mem stats for Windows|Vmware Esxi are stored in the FS table
                try:
                    fs_stat = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name],
                                                  bulk=True)
                except KeyError:
                    self.reset()
                else:
                    for fs in fs_stat:
                        # The Physical Memory (Windows) or Real Memory (VMware)
                        # gives statistics on RAM usage and availability.
                        if fs in ('Physical Memory', 'Real Memory'):
                            stats['total'] = int(fs_stat[fs]['size']) * int(fs_stat[fs]['alloc_unit'])
                            stats['used'] = int(fs_stat[fs]['used']) * int(fs_stat[fs]['alloc_unit'])
                            stats['percent'] = float(stats['used'] * 100 / stats['total'])
                            stats['free'] = stats['total'] - stats['used']
                            break
            else:
                # Default behavor for others OS
                stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])

                if stats['total'] == '':
                    self.reset()
                    return self.stats

                for key in iterkeys(stats):
                    if stats[key] != '':
                        stats[key] = float(stats[key]) * 1024

                # Use the 'free'/htop calculation
                stats['free'] = stats['free'] - stats['total'] + (stats['buffers'] + stats['cached'])

                # used=total-free
                stats['used'] = stats['total'] - stats['free']

                # percent: the percentage usage calculated as (total - available) / total * 100.
                stats['percent'] = float((stats['total'] - stats['free']) / stats['total'] * 100)

        # Update the stats
        self.stats = stats

        return self.stats
예제 #29
0
    def generate_graph(self, stats):
        """Generate graphs from plugins history.

        Return the number of output files generated by the function.
        """
        if not self.graph_enabled():
            return 0

        index_all = 0
        for p in stats.getAllPlugins():
            h = stats.get_plugin(p).get_stats_history()
            # Data
            if h is None:
                # History (h) not available for plugin (p)
                continue
            # Init graph
            plt.clf()
            index_graph = 0
            handles = []
            labels = []
            for i in stats.get_plugin(p).get_items_history_list():
                if i['name'] in iterkeys(h):
                    # The key exist
                    # Add the curves in the current chart
                    logger.debug("Generate graph: %s %s" % (p, i['name']))
                    index_graph += 1
                    # Labels
                    handles.append(plt.Rectangle((0, 0), 1, 1, fc=self.get_graph_color(i), ec=self.get_graph_color(i), linewidth=2))
                    labels.append(self.get_graph_legend(i))
                    # Legend
                    plt.ylabel(self.get_graph_yunit(i, pre_label=''))
                    # Curves
                    plt.grid(True)
                    plt.plot_date(h['date'], h[i['name']],
                                  fmt='', drawstyle='default', linestyle='-',
                                  color=self.get_graph_color(i),
                                  xdate=True, ydate=False)
                    if index_graph == 1:
                        # Title only on top of the first graph
                        plt.title(p.capitalize())
                else:
                    # The key did not exist
                    # Find if anothers key ends with the key
                    # Ex: key='tx' => 'ethernet_tx'
                    # Add one curve per chart
                    stats_history_filtered = sorted([key for key in iterkeys(h) if key.endswith('_' + i['name'])])
                    logger.debug("Generate graphs: %s %s" %
                                 (p, stats_history_filtered))
                    if len(stats_history_filtered) > 0:
                        # Create 'n' graph
                        # Each graph iter through the stats
                        plt.clf()
                        index_item = 0
                        for k in stats_history_filtered:
                            index_item += 1
                            plt.subplot(
                                len(stats_history_filtered), 1, index_item)
                            plt.ylabel(self.get_graph_yunit(i, pre_label=k))
                            plt.grid(True)
                            plt.plot_date(h['date'], h[k],
                                          fmt='', drawstyle='default', linestyle='-',
                                          color=self.get_graph_color(i),
                                          xdate=True, ydate=False)
                            if index_item == 1:
                                # Title only on top of the first graph
                                plt.title(p.capitalize() + ' ' + i['name'])
                        # Save the graph to output file
                        fig = plt.gcf()
                        fig.set_size_inches(20, 5 * index_item)
                        plt.xlabel('Date')
                        plt.savefig(
                            os.path.join(self.output_folder, 'glances_%s_%s.png' % (p, i['name'])), dpi=72)
                        index_all += 1

            if index_graph > 0:
                # Save the graph to output file
                fig = plt.gcf()
                fig.set_size_inches(20, 10)
                plt.legend(handles, labels, loc=1, prop={'size': 9})
                plt.xlabel('Date')
                plt.savefig(os.path.join(self.output_folder, 'glances_%s.png' % p), dpi=72)
                index_all += 1

            plt.close()

        return index_all