Esempio n. 1
0
    def __init__(self, cached_time=1):
        self.cpu_percent = 0

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time
Esempio n. 2
0
 def __get_percpu(self):
     """Update and/or return the per CPU list using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer_percpu.finished():
         self.percpu_percent = []
         for cpu_number, cputimes in enumerate(
                 psutil.cpu_times_percent(interval=0.0, percpu=True)):
             cpu = {
                 'key': self.get_key(),
                 'cpu_number': cpu_number,
                 'total': round(100 - cputimes.idle, 1),
                 'user': cputimes.user,
                 'system': cputimes.system,
                 'idle': cputimes.idle
             }
             # The following stats are for API purposes only
             if hasattr(cputimes, 'nice'):
                 cpu['nice'] = cputimes.nice
             if hasattr(cputimes, 'iowait'):
                 cpu['iowait'] = cputimes.iowait
             if hasattr(cputimes, 'irq'):
                 cpu['irq'] = cputimes.irq
             if hasattr(cputimes, 'softirq'):
                 cpu['softirq'] = cputimes.softirq
             if hasattr(cputimes, 'steal'):
                 cpu['steal'] = cputimes.steal
             if hasattr(cputimes, 'guest'):
                 cpu['guest'] = cputimes.guest
             if hasattr(cputimes, 'guest_nice'):
                 cpu['guest_nice'] = cputimes.guest_nice
             # Append new CPU to the list
             self.percpu_percent.append(cpu)
             # Reset timer for cache
             self.timer_percpu = Timer(self.cached_time)
     return self.percpu_percent
Esempio n. 3
0
 def get(self):
     """Update and/or return the CPU using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer.finished():
         self.cpu_percent = psutil.cpu_percent(interval=0.0)
         self.timer = Timer(self.cached_time)
     return self.cpu_percent
Esempio n. 4
0
    def update(self, servers_list):
        """Update the servers' list screen.

        Wait for __refresh_time sec / catch key every 100 ms.

        servers_list: Dict of dict with servers stats
        """
        # Flush display
        self.flush(servers_list)

        # Wait
        exitkey = False
        countdown = Timer(self.__refresh_time)
        while not countdown.finished() and not exitkey:
            # Getkey
            pressedkey = self.__catch_key(servers_list)
            # Is it an exit or select server key ?
            exitkey = (
                pressedkey == ord('\x1b') or pressedkey == ord('q') or pressedkey == 10)
            if not exitkey and pressedkey > -1:
                # Redraw display
                self.flush(servers_list)
            # Wait 100ms...
            curses.napms(100)

        return self.get_active()
Esempio n. 5
0
 def __get_cpu(self):
     """Update and/or return the CPU using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer_cpu.finished():
         self.cpu_percent = psutil.cpu_percent(interval=0.0)
         # Reset timer for cache
         self.timer_cpu = Timer(self.cached_time)
     return self.cpu_percent
Esempio n. 6
0
    def __init__(self, cached_time=1):
        self.cpu_percent = 0
        self.percpu_percent = []

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer_cpu = Timer(0)
        self.timer_percpu = Timer(0)
        self.cached_time = cached_time
Esempio n. 7
0
    def __init__(self, cached_time=1, config=None):
        # Init stats
        self.stats = GlancesStatsServer(config)

        # Initial update
        self.stats.update()

        # cached_time is the minimum time interval between stats updates
        # i.e. XML/RPC calls will not retrieve updated info until the time
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time
Esempio n. 8
0
    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Wether or not to enable process tree
        self._enable_tree = False
        self.process_tree = None

        # Init stats
        self.auto_sort = True
        self._sort_key = 'cpu_percent'
        self.allprocesslist = []
        self.processlist = []
        self.processcount = {
            'total': 0,
            'running': 0,
            'sleeping': 0,
            'thread': 0
        }

        # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI (None if no limit)
        self._max_processes = None

        # Process filter is a regular expression
        self._process_filter = None
        self._process_filter_re = None

        # Whether or not to hide kernel threads
        self.no_kernel_threads = False
Esempio n. 9
0
 def __get_percpu(self):
     """Update and/or return the per CPU list using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer_percpu.finished():
         self.percpu_percent = []
         for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
             cpu = {'key': self.get_key(),
                    'cpu_number': cpu_number,
                    'total': round(100 - cputimes.idle, 1),
                    'user': cputimes.user,
                    'system': cputimes.system,
                    'idle': cputimes.idle}
             # The following stats are for API purposes only
             if hasattr(cputimes, 'nice'):
                 cpu['nice'] = cputimes.nice
             if hasattr(cputimes, 'iowait'):
                 cpu['iowait'] = cputimes.iowait
             if hasattr(cputimes, 'irq'):
                 cpu['irq'] = cputimes.irq
             if hasattr(cputimes, 'softirq'):
                 cpu['softirq'] = cputimes.softirq
             if hasattr(cputimes, 'steal'):
                 cpu['steal'] = cputimes.steal
             if hasattr(cputimes, 'guest'):
                 cpu['guest'] = cputimes.guest
             if hasattr(cputimes, 'guest_nice'):
                 cpu['guest_nice'] = cputimes.guest_nice
             # Append new CPU to the list
             self.percpu_percent.append(cpu)
             # Reset timer for cache
             self.timer_percpu = Timer(self.cached_time)
     return self.percpu_percent
Esempio n. 10
0
 def get(self):
     """Update and/or return the CPU using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer.finished():
         self.cpu_percent = psutil.cpu_percent(interval=0.0)
         self.timer = Timer(self.cached_time)
     return self.cpu_percent
Esempio n. 11
0
    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Init stats
        self.resetsort()
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU comsumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI interface
        # None if no limit
        self.max_processes = None

        # Process filter is a regular expression
        self.process_filter = None
        self.process_filter_re = None
Esempio n. 12
0
 def __get_cpu(self):
     """Update and/or return the CPU using the psutil library."""
     # Never update more than 1 time per cached_time
     if self.timer_cpu.finished():
         self.cpu_percent = psutil.cpu_percent(interval=0.0)
         # Reset timer for cache
         self.timer_cpu = Timer(self.cached_time)
     return self.cpu_percent
Esempio n. 13
0
    def __init__(self, cached_time=1, config=None):
        # Init stats
        self.stats = GlancesStatsServer(config)

        # Initial update
        self.stats.update()

        # cached_time is the minimum time interval between stats updates
        # i.e. XML/RPC calls will not retrieve updated info until the time
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time
Esempio n. 14
0
    def update(self, stats, cs_status="None"):
        """Update the screen.

        Wait for __refresh_time sec / catch key every 100 ms.

        stats: Stats database to display
        cs_status:
            "None": standalone or server mode
            "Connected": Client is connected to the server
            "Disconnected": Client is disconnected from the server
        """
        # Flush display
        self.flush(stats, cs_status=cs_status)

        # Wait
        countdown = Timer(self.__refresh_time)
        while not countdown.finished():
            # Getkey
            if self.__catch_key() > -1:
                # Redraw display
                self.flush(stats, cs_status=cs_status)                
            # Wait 100ms...
            curses.napms(100)
Esempio n. 15
0
    def update(self, stats, cs_status="None"):
        """Update the screen.

        Wait for __refresh_time sec / catch key every 100 ms.

        stats: Stats database to display
        cs_status:
            "None": standalone or server mode
            "Connected": Client is connected to the server
            "Disconnected": Client is disconnected from the server
        """
        # Flush display
        self.flush(stats, cs_status=cs_status)

        # Wait
        countdown = Timer(self.__refresh_time)
        while not countdown.finished():
            # Getkey
            if self.__catch_key() > -1:
                # Redraw display
                self.flush(stats, cs_status=cs_status)
            # Wait 100ms...
            curses.napms(100)
Esempio n. 16
0
    def update(self, stats, cs_status="None", return_to_browser=False):
        """Update the screen.

        Wait for __refresh_time sec / catch key every 100 ms.

        INPUT
        stats: Stats database to display
        cs_status:
            "None": standalone or server mode
            "Connected": Client is connected to the server
            "Disconnected": Client is disconnected from the server
        return_to_browser:
            True: Do not exist, return to the browser list
            False: Exit and return to the shell

        OUPUT
        True: Exit key has been pressed
        False: Others cases...
        """
        # Flush display
        self.flush(stats, cs_status=cs_status)

        # Wait
        exitkey = False
        countdown = Timer(self.__refresh_time)
        while not countdown.finished() and not exitkey:
            # Getkey
            pressedkey = self.__catch_key(return_to_browser=return_to_browser)
            # Is it an exit key ?
            exitkey = (pressedkey == ord('\x1b') or pressedkey == ord('q'))
            if not exitkey and pressedkey > -1:
                # Redraw display
                self.flush(stats, cs_status=cs_status)
            # Wait 100ms...
            curses.napms(100)

        return exitkey
Esempio n. 17
0
class CpuPercent(object):
    """Get and store the CPU percent."""
    def __init__(self, cached_time=1):
        self.cpu_percent = 0

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time

    def get(self):
        """Update and/or return the CPU using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer.finished():
            self.cpu_percent = psutil.cpu_percent(interval=0.0)
            self.timer = Timer(self.cached_time)
        return self.cpu_percent
Esempio n. 18
0
    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Wether or not to enable process tree
        self._enable_tree = False
        self.process_tree = None

        # Init stats
        self.auto_sort = True
        self._sort_key = "cpu_percent"
        self.allprocesslist = []
        self.processlist = []
        self.processcount = {"total": 0, "running": 0, "sleeping": 0, "thread": 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI (None if no limit)
        self._max_processes = None

        # Process filter is a regular expression
        self._process_filter = None
        self._process_filter_re = None

        # Whether or not to hide kernel threads
        self.no_kernel_threads = False
Esempio n. 19
0
class CpuPercent(object):

    """Get and store the CPU percent."""

    def __init__(self, cached_time=1):
        self.cpu_percent = 0

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time

    def get(self):
        """Update and/or return the CPU using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer.finished():
            self.cpu_percent = psutil.cpu_percent(interval=0.0)
            self.timer = Timer(self.cached_time)
        return self.cpu_percent
Esempio n. 20
0
    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Init stats
        self.processsort = 'cpu_percent'
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU comsumption)
        # Default is to enable the processes stats
        self.disable_tag = False
Esempio n. 21
0
class CpuPercent(object):

    """Get and store the CPU percent."""

    def __init__(self, cached_time=1):
        self.cpu_percent = 0
        self.percpu_percent = []

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer_cpu = Timer(0)
        self.timer_percpu = Timer(0)
        self.cached_time = cached_time

    def get_key(self):
        """Return the key of the per CPU list."""
        return 'cpu_number'

    def get(self, percpu=False):
        """Update and/or return the CPU using the psutil library.
        If percpu, return the percpu stats"""
        if percpu:
            return self.__get_percpu()
        else:
            return self.__get_cpu()

    def __get_cpu(self):
        """Update and/or return the CPU using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer_cpu.finished():
            self.cpu_percent = psutil.cpu_percent(interval=0.0)
            # Reset timer for cache
            self.timer_cpu = Timer(self.cached_time)
        return self.cpu_percent

    def __get_percpu(self):
        """Update and/or return the per CPU list using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer_percpu.finished():
            self.percpu_percent = []
            for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
                cpu = {'key': self.get_key(),
                       'cpu_number': cpu_number,
                       'total': round(100 - cputimes.idle, 1),
                       'user': cputimes.user,
                       'system': cputimes.system,
                       'idle': cputimes.idle}
                # The following stats are for API purposes only
                if hasattr(cputimes, 'nice'):
                    cpu['nice'] = cputimes.nice
                if hasattr(cputimes, 'iowait'):
                    cpu['iowait'] = cputimes.iowait
                if hasattr(cputimes, 'irq'):
                    cpu['irq'] = cputimes.irq
                if hasattr(cputimes, 'softirq'):
                    cpu['softirq'] = cputimes.softirq
                if hasattr(cputimes, 'steal'):
                    cpu['steal'] = cputimes.steal
                if hasattr(cputimes, 'guest'):
                    cpu['guest'] = cputimes.guest
                if hasattr(cputimes, 'guest_nice'):
                    cpu['guest_nice'] = cputimes.guest_nice
                # Append new CPU to the list
                self.percpu_percent.append(cpu)
                # Reset timer for cache
                self.timer_percpu = Timer(self.cached_time)
        return self.percpu_percent
Esempio n. 22
0
 def __update__(self):
     # Never update more than 1 time per cached_time
     if self.timer.finished():
         self.stats.update()
         self.timer = Timer(self.cached_time)
Esempio n. 23
0
class GlancesProcesses(object):

    """Get processed stats using the psutil library."""

    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Init stats
        self.processsort = 'cpu_percent'
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU comsumption)
        # Default is to enable the processes stats
        self.disable_tag = False

    def enable(self):
        """Enable process stats."""
        self.disable_tag = False
        self.update()

    def disable(self):
        """Disable process stats."""
        self.disable_tag = True

    def __get_process_stats(self, proc):
        """Get process stats."""
        procstat = {}

        # Process ID
        procstat['pid'] = proc.pid

        # Process name (cached by PSUtil)
        try:
            procstat['name'] = proc.name()
        except psutil.AccessDenied:
            procstat['name'] = ""

        # Process username (cached with internal cache)
        try:
            self.username_cache[procstat['pid']]
        except:
            try:
                self.username_cache[procstat['pid']] = proc.username()
            except (KeyError, psutil.AccessDenied):
                try:
                    self.username_cache[procstat['pid']] = proc.uids().real
                except (KeyError, AttributeError, psutil.AccessDenied):
                    self.username_cache[procstat['pid']] = "?"
        procstat['username'] = self.username_cache[procstat['pid']]

        # Process command line (cached with internal cache)
        try:
            self.cmdline_cache[procstat['pid']]
        except:
            self.cmdline_cache[procstat['pid']] = ' '.join(proc.cmdline())
        procstat['cmdline'] = self.cmdline_cache[procstat['pid']]

        # Process status
        procstat['status'] = str(proc.status())[:1].upper()

        # Process nice
        try:
            procstat['nice'] = proc.nice()
        except psutil.AccessDenied:
            procstat['nice'] = None

        # Process memory
        procstat['memory_info'] = proc.memory_info()
        procstat['memory_percent'] = proc.memory_percent()

        # Process CPU
        procstat['cpu_times'] = proc.cpu_times()
        procstat['cpu_percent'] = proc.cpu_percent(interval=0)

        # Process network connections (TCP and UDP) (Experimental)
        # !!! High CPU consumption
        # try:
        #     procstat['tcp'] = len(proc.connections(kind="tcp"))
        #     procstat['udp'] = len(proc.connections(kind="udp"))
        # except:
        #     procstat['tcp'] = 0
        #     procstat['udp'] = 0

        # Process IO
        # procstat['io_counters'] is a list:
        # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag]
        # If io_tag = 0 > Access denied (display "?")
        # If io_tag = 1 > No access denied (display the IO rate)
        # Note Disk IO stat not available on Mac OS
        if not is_mac:
            try:
                # Get the process IO counters
                proc_io = proc.io_counters()
                io_new = [proc_io.read_bytes, proc_io.write_bytes]
            except psutil.AccessDenied:
                # Access denied to process IO (no root account)
                # Put 0 in all values (for sort) and io_tag = 0 (for display)
                procstat['io_counters'] = [0, 0] + [0, 0]
                io_tag = 0
            else:
                # For IO rate computation
                # Append saved IO r/w bytes
                try:
                    procstat['io_counters'] = io_new + self.io_old[procstat['pid']]
                except KeyError:
                    procstat['io_counters'] = io_new + [0, 0]
                # then save the IO r/w bytes
                self.io_old[procstat['pid']] = io_new
                io_tag = 1

            # Append the IO tag (for display)
            procstat['io_counters'] += [io_tag]

        return procstat

    def update(self):
        """Update the processes stats."""
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # For each existing process...
        for proc in psutil.process_iter():
            try:
                # Get stats using the PSUtil
                procstat = self.__get_process_stats(proc)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # ignore the 'idle' process on Windows and *BSD
                # ignore the 'kernel_task' process on OS X
                # waiting for upstream patch from psutil
                if (is_bsd and procstat['name'] == 'idle' or
                        is_windows and procstat['name'] == 'System Idle Process' or
                        is_mac and procstat['name'] == 'kernel_task'):
                    continue
                # Update processcount (global statistics)
                try:
                    self.processcount[str(proc.status())] += 1
                except KeyError:
                    # Key did not exist, create it
                    self.processcount[str(proc.status())] = 1
                else:
                    self.processcount['total'] += 1
                # Update thread number (global statistics)
                try:
                    self.processcount['thread'] += proc.num_threads()
                except:
                    pass
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                continue
            else:
                # Update processlist
                self.processlist.append(procstat)

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()

    def getcount(self):
        """Get the number of processes."""
        return self.processcount

    def getlist(self, sortedby=None):
        """Get the processlist."""
        return self.processlist

    def getsortkey(self):
        """Get the current sort key for automatic sort."""
        return self.processsort

    def setsortkey(self, sortedby):
        """Set the current sort key for automatic sort."""
        self.processsort = sortedby
        return self.processsort

    def getsortlist(self, sortedby=None):
        """Get the sorted processlist."""
        if sortedby is None:
            # No need to sort...
            return self.processlist

        sortedreverse = True
        if sortedby == 'name':
            sortedreverse = False

        if sortedby == 'io_counters':
            # Specific case for io_counters
            # Sum of io_r + io_w
            try:
                # Sort process by IO rate (sum IO read + IO write)
                listsorted = sorted(self.processlist,
                                    key=lambda process: process[sortedby][0] -
                                    process[sortedby][2] + process[sortedby][1] -
                                    process[sortedby][3],
                                    reverse=sortedreverse)
            except Exception:
                listsorted = sorted(self.processlist,
                                    key=lambda process: process['cpu_percent'],
                                    reverse=sortedreverse)
        else:
            # Others sorts
            listsorted = sorted(self.processlist,
                                key=lambda process: process[sortedby],
                                reverse=sortedreverse)

        self.processlist = listsorted

        return self.processlist
Esempio n. 24
0
class GlancesProcesses(object):
    """Get processed stats using the psutil library."""
    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Wether or not to enable process tree
        self._enable_tree = False
        self.process_tree = None

        # Init stats
        self.resetsort()
        self.processlist = []
        self.processcount = {
            'total': 0,
            'running': 0,
            'sleeping': 0,
            'thread': 0
        }

        # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI interface
        # None if no limit
        self.max_processes = None

        # Process filter is a regular expression
        self.process_filter = None
        self.process_filter_re = None

        # Whether or not to hide kernel threads
        self.no_kernel_threads = False

    def enable(self):
        """Enable process stats."""
        self.disable_tag = False
        self.update()

    def disable(self):
        """Disable process stats."""
        self.disable_tag = True

    def enable_extended(self):
        """Enable extended process stats."""
        self.disable_extended_tag = False
        self.update()

    def disable_extended(self):
        """Disable extended process stats."""
        self.disable_extended_tag = True

    def set_max_processes(self, value):
        """Set the maximum number of processes showed in the UI interfaces"""
        self.max_processes = value
        return self.max_processes

    def get_max_processes(self):
        """Get the maximum number of processes showed in the UI interfaces"""
        return self.max_processes

    def set_process_filter(self, value):
        """Set the process filter"""
        logger.info("Set process filter to {0}".format(value))
        self.process_filter = value
        if value is not None:
            try:
                self.process_filter_re = re.compile(value)
                logger.debug("Process filter regex compilation OK: {0}".format(
                    self.get_process_filter()))
            except Exception:
                logger.error(
                    "Cannot compile process filter regex: {0}".format(value))
                self.process_filter_re = None
        else:
            self.process_filter_re = None
        return self.process_filter

    def get_process_filter(self):
        """Get the process filter"""
        return self.process_filter

    def get_process_filter_re(self):
        """Get the process regular expression compiled"""
        return self.process_filter_re

    def is_filtered(self, value):
        """Return True if the value should be filtered"""
        if self.get_process_filter() is None:
            # No filter => Not filtered
            return False
        else:
            # logger.debug(self.get_process_filter() + " <> " + value + " => " + str(self.get_process_filter_re().match(value) is None))
            return self.get_process_filter_re().match(value) is None

    def disable_kernel_threads(self):
        """ Ignore kernel threads in process list. """
        self.no_kernel_threads = True

    def enable_tree(self):
        """ Enable process tree. """
        self._enable_tree = True

    def is_tree_enabled(self):
        """ Return True if process tree is enabled, False instead. """
        return self._enable_tree

    def __get_process_stats(self,
                            proc,
                            mandatory_stats=True,
                            standard_stats=True,
                            extended_stats=False):
        """
        Get process stats of the proc processes (proc is returned psutil.process_iter())
        mandatory_stats: need for the sorting/filter step
        => cpu_percent, memory_percent, io_counters, name, cmdline
        standard_stats: for all the displayed processes
        => username, status, memory_info, cpu_times
        extended_stats: only for top processes (see issue #403)
        => connections (UDP/TCP), memory_swap...
        """

        # Process ID (always)
        procstat = proc.as_dict(attrs=['pid'])

        if mandatory_stats:
            procstat['mandatory_stats'] = True

            # Process CPU, MEM percent and name
            try:
                procstat.update(
                    proc.as_dict(attrs=[
                        'cpu_percent', 'memory_percent', 'name', 'cpu_times'
                    ],
                                 ad_value=''))
            except psutil.NoSuchProcess:
                # Correct issue #414
                return None
            if procstat['cpu_percent'] == '' or procstat[
                    'memory_percent'] == '':
                # Do not display process if we cannot get the basic
                # cpu_percent or memory_percent stats
                return None

            # Process command line (cached with internal cache)
            try:
                self.cmdline_cache[procstat['pid']]
            except KeyError:
                # Patch for issue #391
                try:
                    self.cmdline_cache[procstat['pid']] = ' '.join(
                        proc.cmdline())
                except (AttributeError, UnicodeDecodeError,
                        psutil.AccessDenied, psutil.NoSuchProcess):
                    self.cmdline_cache[procstat['pid']] = ""
            procstat['cmdline'] = self.cmdline_cache[procstat['pid']]

            # Process IO
            # procstat['io_counters'] is a list:
            # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag]
            # If io_tag = 0 > Access denied (display "?")
            # If io_tag = 1 > No access denied (display the IO rate)
            # Note Disk IO stat not available on Mac OS
            if not is_mac:
                try:
                    # Get the process IO counters
                    proc_io = proc.io_counters()
                    io_new = [proc_io.read_bytes, proc_io.write_bytes]
                except (psutil.AccessDenied, psutil.NoSuchProcess):
                    # Access denied to process IO (no root account)
                    # NoSuchProcess (process die between first and second grab)
                    # Put 0 in all values (for sort) and io_tag = 0 (for
                    # display)
                    procstat['io_counters'] = [0, 0] + [0, 0]
                    io_tag = 0
                else:
                    # For IO rate computation
                    # Append saved IO r/w bytes
                    try:
                        procstat['io_counters'] = io_new + \
                            self.io_old[procstat['pid']]
                    except KeyError:
                        procstat['io_counters'] = io_new + [0, 0]
                    # then save the IO r/w bytes
                    self.io_old[procstat['pid']] = io_new
                    io_tag = 1

                # Append the IO tag (for display)
                procstat['io_counters'] += [io_tag]

        if standard_stats:
            procstat['standard_stats'] = True

            # Process username (cached with internal cache)
            try:
                self.username_cache[procstat['pid']]
            except KeyError:
                try:
                    self.username_cache[procstat['pid']] = proc.username()
                except psutil.NoSuchProcess:
                    self.username_cache[procstat['pid']] = "?"
                except (KeyError, psutil.AccessDenied):
                    try:
                        self.username_cache[procstat['pid']] = proc.uids().real
                    except (KeyError, AttributeError, psutil.AccessDenied):
                        self.username_cache[procstat['pid']] = "?"
            procstat['username'] = self.username_cache[procstat['pid']]

            # Process status, nice, memory_info and cpu_times
            try:
                procstat.update(
                    proc.as_dict(
                        attrs=['status', 'nice', 'memory_info', 'cpu_times']))
            except psutil.NoSuchProcess:
                pass
            else:
                procstat['status'] = str(procstat['status'])[:1].upper()

        if extended_stats and not self.disable_extended_tag:
            procstat['extended_stats'] = True

            # CPU affinity (Windows and Linux only)
            try:
                procstat.update(proc.as_dict(attrs=['cpu_affinity']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['cpu_affinity'] = None
            # Memory extended
            try:
                procstat.update(proc.as_dict(attrs=['memory_info_ex']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['memory_info_ex'] = None
            # Number of context switch
            try:
                procstat.update(proc.as_dict(attrs=['num_ctx_switches']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_ctx_switches'] = None
            # Number of file descriptors (Unix only)
            try:
                procstat.update(proc.as_dict(attrs=['num_fds']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_fds'] = None
            # Threads number
            try:
                procstat.update(proc.as_dict(attrs=['num_threads']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_threads'] = None

            # Number of handles (Windows only)
            if is_windows:
                try:
                    procstat.update(proc.as_dict(attrs=['num_handles']))
                except psutil.NoSuchProcess:
                    pass
            else:
                procstat['num_handles'] = None

            # SWAP memory (Only on Linux based OS)
            # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/
            if is_linux:
                try:
                    procstat['memory_swap'] = sum(
                        [v.swap for v in proc.memory_maps()])
                except psutil.NoSuchProcess:
                    pass
                except psutil.AccessDenied:
                    procstat['memory_swap'] = None
                except Exception:
                    # Add a dirty except to handle the PsUtil issue #413
                    procstat['memory_swap'] = None

            # Process network connections (TCP and UDP)
            try:
                procstat['tcp'] = len(proc.connections(kind="tcp"))
                procstat['udp'] = len(proc.connections(kind="udp"))
            except Exception:
                procstat['tcp'] = None
                procstat['udp'] = None

            # IO Nice
            # http://pythonhosted.org/psutil/#psutil.Process.ionice
            if is_linux or is_windows:
                try:
                    procstat.update(proc.as_dict(attrs=['ionice']))
                except psutil.NoSuchProcess:
                    pass
            else:
                procstat['ionice'] = None

            # logger.debug(procstat)

        return procstat

    def update(self):
        """
        Update the processes stats
        """
        # Reset the stats
        self.processlist = []
        self.processcount = {
            'total': 0,
            'running': 0,
            'sleeping': 0,
            'thread': 0
        }

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # Ignore kernel threads if needed
            if (self.no_kernel_threads and (not is_windows)
                    and is_kernel_thread(proc)):
                continue

            # If self.get_max_processes() is None: Only retreive mandatory stats
            # Else: retreive mandatory and standard stats
            s = self.__get_process_stats(
                proc,
                mandatory_stats=True,
                standard_stats=self.get_max_processes() is None)
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline'])
                             and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or is_windows
                    and processdict[proc]['name'] == 'System Idle Process'
                    or is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                try:
                    self.processcount[str(proc.status())] = 1
                except psutil.NoSuchProcess:
                    pass
            except psutil.NoSuchProcess:
                pass
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except Exception:
                pass

        if self._enable_tree:
            self.process_tree = ProcessTreeNode.build_tree(
                processdict, self.getsortkey(), self.no_kernel_threads)

            for i, node in enumerate(self.process_tree):
                # Only retreive stats for visible processes (get_max_processes)
                if (self.get_max_processes()
                        is not None) and (i >= self.get_max_processes()):
                    break

                # add standard stats
                new_stats = self.__get_process_stats(node.process,
                                                     mandatory_stats=False,
                                                     standard_stats=True,
                                                     extended_stats=False)
                if new_stats is not None:
                    node.stats.update(new_stats)

                # Add a specific time_since_update stats for bitrate
                node.stats['time_since_update'] = time_since_update

        else:
            # Process optimization
            # Only retreive stats for visible processes (get_max_processes)
            if self.get_max_processes() is not None:
                # Sort the internal dict and cut the top N (Return a list of tuple)
                # tuple=key (proc), dict (returned by __get_process_stats)
                try:
                    processiter = sorted(processdict.items(),
                                         key=lambda x: x[1][self.getsortkey()],
                                         reverse=True)
                except (KeyError, TypeError) as e:
                    logger.error("Cannot sort process list by %s (%s)" %
                                 (self.getsortkey(), e))
                    logger.error("%s" % str(processdict.items()[0]))
                    # Fallback to all process (issue #423)
                    processloop = processdict.items()
                    first = False
                else:
                    processloop = processiter[0:self.get_max_processes()]
                    first = True
            else:
                # Get all processes stats
                processloop = processdict.items()
                first = False
            for i in processloop:
                # Already existing mandatory stats
                procstat = i[1]
                if self.get_max_processes() is not None:
                    # Update with standard stats
                    # and extended stats but only for TOP (first) process
                    s = self.__get_process_stats(i[0],
                                                 mandatory_stats=False,
                                                 standard_stats=True,
                                                 extended_stats=first)
                    if s is None:
                        continue
                    procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()

    def getcount(self):
        """Get the number of processes."""
        return self.processcount

    def getlist(self, sortedby=None):
        """Get the processlist."""
        return self.processlist

    def gettree(self):
        """Get the process tree."""
        return self.process_tree

    def getsortkey(self):
        """Get the current sort key"""
        if self.getmanualsortkey() is not None:
            return self.getmanualsortkey()
        else:
            return self.getautosortkey()

    def getmanualsortkey(self):
        """Get the current sort key for manual sort."""
        return self.processmanualsort

    def getautosortkey(self):
        """Get the current sort key for automatic sort."""
        return self.processautosort

    def setmanualsortkey(self, sortedby):
        """Set the current sort key for manual sort."""
        self.processmanualsort = sortedby
        if self._enable_tree and (self.process_tree is not None):
            self.process_tree.set_sorting(sortedby, sortedby != "name")
        return self.processmanualsort

    def setautosortkey(self, sortedby):
        """Set the current sort key for automatic sort."""
        self.processautosort = sortedby
        return self.processautosort

    def resetsort(self):
        """Set the default sort: Auto"""
        self.setmanualsortkey(None)
        self.setautosortkey('cpu_percent')

    def getsortlist(self, sortedby=None):
        """Get the sorted processlist."""
        if sortedby is None:
            # No need to sort...
            return self.processlist

        sortedreverse = True
        if sortedby == 'name':
            sortedreverse = False

        if sortedby == 'io_counters':
            # Specific case for io_counters
            # Sum of io_r + io_w
            try:
                # Sort process by IO rate (sum IO read + IO write)
                self.processlist.sort(key=lambda process: process[sortedby][
                    0] - process[sortedby][2] + process[sortedby][1] - process[
                        sortedby][3],
                                      reverse=sortedreverse)
            except Exception:
                self.processlist.sort(key=operator.itemgetter('cpu_percent'),
                                      reverse=sortedreverse)
        else:
            # Others sorts
            self.processlist.sort(key=operator.itemgetter(sortedby),
                                  reverse=sortedreverse)

        return self.processlist
Esempio n. 25
0
class CpuPercent(object):
    """Get and store the CPU percent."""
    def __init__(self, cached_time=1):
        self.cpu_percent = 0
        self.percpu_percent = []

        # cached_time is the minimum time interval between stats updates
        # since last update is passed (will retrieve old cached info instead)
        self.timer_cpu = Timer(0)
        self.timer_percpu = Timer(0)
        self.cached_time = cached_time

    def get_key(self):
        """Return the key of the per CPU list."""
        return 'cpu_number'

    def get(self, percpu=False):
        """Update and/or return the CPU using the psutil library.
        If percpu, return the percpu stats"""
        if percpu:
            return self.__get_percpu()
        else:
            return self.__get_cpu()

    def __get_cpu(self):
        """Update and/or return the CPU using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer_cpu.finished():
            self.cpu_percent = psutil.cpu_percent(interval=0.0)
            # Reset timer for cache
            self.timer_cpu = Timer(self.cached_time)
        return self.cpu_percent

    def __get_percpu(self):
        """Update and/or return the per CPU list using the psutil library."""
        # Never update more than 1 time per cached_time
        if self.timer_percpu.finished():
            self.percpu_percent = []
            for cpu_number, cputimes in enumerate(
                    psutil.cpu_times_percent(interval=0.0, percpu=True)):
                cpu = {
                    'key': self.get_key(),
                    'cpu_number': cpu_number,
                    'total': round(100 - cputimes.idle, 1),
                    'user': cputimes.user,
                    'system': cputimes.system,
                    'idle': cputimes.idle
                }
                # The following stats are for API purposes only
                if hasattr(cputimes, 'nice'):
                    cpu['nice'] = cputimes.nice
                if hasattr(cputimes, 'iowait'):
                    cpu['iowait'] = cputimes.iowait
                if hasattr(cputimes, 'irq'):
                    cpu['irq'] = cputimes.irq
                if hasattr(cputimes, 'softirq'):
                    cpu['softirq'] = cputimes.softirq
                if hasattr(cputimes, 'steal'):
                    cpu['steal'] = cputimes.steal
                if hasattr(cputimes, 'guest'):
                    cpu['guest'] = cputimes.guest
                if hasattr(cputimes, 'guest_nice'):
                    cpu['guest_nice'] = cputimes.guest_nice
                # Append new CPU to the list
                self.percpu_percent.append(cpu)
                # Reset timer for cache
                self.timer_percpu = Timer(self.cached_time)
        return self.percpu_percent
Esempio n. 26
0
class GlancesProcesses(object):

    """Get processed stats using the psutil library."""

    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Init stats
        self.resetsort()
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU comsumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI interface
        # None if no limit
        self.max_processes = None

        # Process filter is a regular expression
        self.process_filter = None
        self.process_filter_re = None

        # !!! ONLY FOR TEST
        # self.set_process_filter('.*python.*')

    def enable(self):
        """Enable process stats."""
        self.disable_tag = False
        self.update()

    def disable(self):
        """Disable process stats."""
        self.disable_tag = True

    def enable_extended(self):
        """Enable extended process stats."""
        self.disable_extended_tag = False
        self.update()

    def disable_extended(self):
        """Disable extended process stats."""
        self.disable_extended_tag = True

    def set_max_processes(self, value):
        """Set the maximum number of processes showed in the UI interfaces"""
        self.max_processes = value
        return self.max_processes

    def get_max_processes(self):
        """Get the maximum number of processes showed in the UI interfaces"""
        return self.max_processes

    def set_process_filter(self, value):
        """Set the process filter"""
        logger.info(_("Set process filter to %s") % value)
        self.process_filter = value
        if value is not None:
            try:
                self.process_filter_re = re.compile(value)
                logger.debug(_("Process filter regular expression compilation OK: %s") % self.get_process_filter())
            except:
                logger.error(_("Can not compile process filter regular expression: %s") % value)
                self.process_filter_re = None
        else:
            self.process_filter_re = None
        return self.process_filter

    def get_process_filter(self):
        """Get the process filter"""
        return self.process_filter

    def get_process_filter_re(self):
        """Get the process regular expression compiled"""
        return self.process_filter_re

    def is_filtered(self, value):
        """Return True if the value should be filtered"""
        if self.get_process_filter() is None:
            # No filter => Not filtered
            return False
        else:
            # logger.debug(self.get_process_filter() + " <> " + value + " => " + str(self.get_process_filter_re().match(value) is None))
            return self.get_process_filter_re().match(value) is None

    def __get_process_stats(self, proc,
                            mandatory_stats=True,
                            standard_stats=True, 
                            extended_stats=False):
        """
        Get process stats of the proc processes (proc is returned psutil.process_iter())
        mandatory_stats: need for the sorting/filter step
        => cpu_percent, memory_percent, io_counters, name, cmdline
        standard_stats: for all the displayed processes
        => username, status, memory_info, cpu_times
        extended_stats: only for top processes (see issue #403)
        => connections (UDP/TCP), memory_swap...
        """

        # Process ID (always)
        procstat = proc.as_dict(attrs=['pid'])

        if mandatory_stats:
            procstat['mandatory_stats'] = True 

            # Process CPU, MEM percent and name
            procstat.update(proc.as_dict(attrs=['cpu_percent', 'memory_percent', 'name'], ad_value=''))

            # Process command line (cached with internal cache)
            try:
                self.cmdline_cache[procstat['pid']]
            except KeyError:
                # Patch for issue #391
                try:
                    self.cmdline_cache[procstat['pid']] = ' '.join(proc.cmdline())
                except (AttributeError, psutil.AccessDenied, UnicodeDecodeError):
                    self.cmdline_cache[procstat['pid']] = ""
            procstat['cmdline'] = self.cmdline_cache[procstat['pid']]

            # Process IO
            # procstat['io_counters'] is a list:
            # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag]
            # If io_tag = 0 > Access denied (display "?")
            # If io_tag = 1 > No access denied (display the IO rate)
            # Note Disk IO stat not available on Mac OS
            if not is_mac:
                try:
                    # Get the process IO counters
                    proc_io = proc.io_counters()
                    io_new = [proc_io.read_bytes, proc_io.write_bytes]
                except (psutil.AccessDenied, psutil.NoSuchProcess):
                    # Access denied to process IO (no root account)
                    # NoSuchProcess (process die between first and second grab)
                    # Put 0 in all values (for sort) and io_tag = 0 (for display)
                    procstat['io_counters'] = [0, 0] + [0, 0]
                    io_tag = 0
                else:
                    # For IO rate computation
                    # Append saved IO r/w bytes
                    try:
                        procstat['io_counters'] = io_new + self.io_old[procstat['pid']]
                    except KeyError:
                        procstat['io_counters'] = io_new + [0, 0]
                    # then save the IO r/w bytes
                    self.io_old[procstat['pid']] = io_new
                    io_tag = 1

                # Append the IO tag (for display)
                procstat['io_counters'] += [io_tag]

        if standard_stats:
            procstat['standard_stats'] = True 

            # Process username (cached with internal cache)
            try:
                self.username_cache[procstat['pid']]
            except KeyError:
                try:
                    self.username_cache[procstat['pid']] = proc.username()
                except psutil.NoSuchProcess:
                        self.username_cache[procstat['pid']] = "?"
                except (KeyError, psutil.AccessDenied):
                    try:
                        self.username_cache[procstat['pid']] = proc.uids().real
                    except (KeyError, AttributeError, psutil.AccessDenied):
                        self.username_cache[procstat['pid']] = "?"
            procstat['username'] = self.username_cache[procstat['pid']]

            # Process status, nice, memory_info and cpu_times
            try:
                procstat.update(proc.as_dict(attrs=['status', 'nice', 'memory_info', 'cpu_times']))
            except psutil.NoSuchProcess:
                pass
            else:
                procstat['status'] = str(procstat['status'])[:1].upper()

        if extended_stats and not self.disable_extended_tag:
            procstat['extended_stats'] = True 

            # CPU affinity (Windows and Linux only)
            try:
                procstat.update(proc.as_dict(attrs=['cpu_affinity']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['cpu_affinity'] = None
            # Memory extended
            try:
                procstat.update(proc.as_dict(attrs=['memory_info_ex']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['memory_info_ex'] = None
            # Number of context switch
            try:
                procstat.update(proc.as_dict(attrs=['num_ctx_switches']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_ctx_switches'] = None
            # Number of file descriptors (Unix only)
            try:
                procstat.update(proc.as_dict(attrs=['num_fds']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_fds'] = None
            # Threads number
            try:
                procstat.update(proc.as_dict(attrs=['num_threads']))
            except psutil.NoSuchProcess:
                pass
            except AttributeError:
                procstat['num_threads'] = None

            # Number of handles (Windows only)
            if is_windows:
                try:
                    procstat.update(proc.as_dict(attrs=['num_handles']))
                except psutil.NoSuchProcess:
                    pass
            else:
                procstat['num_handles'] = None

            # SWAP memory (Only on Linux based OS)
            # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/
            if is_linux:
                try:
                    procstat['memory_swap'] = sum([v.swap for v in proc.memory_maps()])
                except psutil.NoSuchProcess:
                    pass
                except psutil.AccessDenied:
                    procstat['memory_swap'] = None

            # Process network connections (TCP and UDP)
            try:
                procstat['tcp'] = len(proc.connections(kind="tcp"))
                procstat['udp'] = len(proc.connections(kind="udp"))
            except:
                procstat['tcp'] = None
                procstat['udp'] = None

            # IO Nice
            # http://pythonhosted.org/psutil/#psutil.Process.ionice
            if is_linux or is_windows:
                try:
                    procstat.update(proc.as_dict(attrs=['ionice']))
                except psutil.NoSuchProcess:
                    pass
            else:
                procstat['ionice'] = None

            #logger.debug(procstat)

        return procstat

    def update(self):
        """
        Update the processes stats
        """
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # If self.get_max_processes() is None: Only retreive mandatory stats
            # Else: retreive mandatoryadn standard stast
            s = self.__get_process_stats(proc, 
                                         mandatory_stats=True, 
                                         standard_stats=self.get_max_processes() is None)
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or
                is_windows and processdict[proc]['name'] == 'System Idle Process' or
                is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                self.processcount[str(proc.status())] = 1
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except:
                pass

        if self.get_max_processes() is not None:
            # Sort the internal dict and cut the top N (Return a list of tuple)
            # tuple=key (proc), dict (returned by __get_process_stats)
            processiter = sorted(processdict.items(), key=lambda x: x[1][self.getsortkey()], reverse=True)
            first = True
            for i in processiter[0:self.get_max_processes()]:
                # Already existing mandatory stats
                procstat = i[1]
                # Update with standard stats
                # and extended stats but only for TOP (first) process
                s = self.__get_process_stats(i[0], 
                                             mandatory_stats=False, 
                                             standard_stats=True,
                                             extended_stats=first)
                if s is None:
                    continue
                procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False
        else:
            # Get all the processes
            for i in processdict.items():
                # Already existing mandatory and standard stats
                procstat = i[1]
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()

    def getcount(self):
        """Get the number of processes."""
        return self.processcount

    def getlist(self, sortedby=None):
        """Get the processlist."""
        return self.processlist

    def getsortkey(self):
        """Get the current sort key"""
        if self.getmanualsortkey() is not None:
            return self.getmanualsortkey()
        else:
            return self.getautosortkey()

    def getmanualsortkey(self):
        """Get the current sort key for manual sort."""
        return self.processmanualsort

    def getautosortkey(self):
        """Get the current sort key for automatic sort."""
        return self.processautosort

    def setmanualsortkey(self, sortedby):
        """Set the current sort key for manual sort."""
        self.processmanualsort = sortedby
        return self.processmanualsort

    def setautosortkey(self, sortedby):
        """Set the current sort key for automatic sort."""
        self.processautosort = sortedby
        return self.processautosort

    def resetsort(self):
        """Set the default sort: Auto"""
        self.setmanualsortkey(None)
        self.setautosortkey('cpu_percent')

    def getsortlist(self, sortedby=None):
        """Get the sorted processlist."""
        if sortedby is None:
            # No need to sort...
            return self.processlist

        sortedreverse = True
        if sortedby == 'name':
            sortedreverse = False

        if sortedby == 'io_counters':
            # Specific case for io_counters
            # Sum of io_r + io_w
            try:
                # Sort process by IO rate (sum IO read + IO write)
                listsorted = sorted(self.processlist,
                                    key=lambda process: process[sortedby][0] -
                                    process[sortedby][2] + process[sortedby][1] -
                                    process[sortedby][3],
                                    reverse=sortedreverse)
            except Exception:
                listsorted = sorted(self.processlist,
                                    key=lambda process: process['cpu_percent'],
                                    reverse=sortedreverse)
        else:
            # Others sorts
            listsorted = sorted(self.processlist,
                                key=lambda process: process[sortedby],
                                reverse=sortedreverse)

        self.processlist = listsorted

        return self.processlist
Esempio n. 27
0
class GlancesProcesses(object):

    """Get processed stats using the psutil library."""

    def __init__(self, cache_timeout=60):
        """Init the class to collect stats about processes."""
        # Add internals caches because PSUtil do not cache all the stats
        # See: https://code.google.com/p/psutil/issues/detail?id=462
        self.username_cache = {}
        self.cmdline_cache = {}

        # The internals caches will be cleaned each 'cache_timeout' seconds
        self.cache_timeout = cache_timeout
        self.cache_timer = Timer(self.cache_timeout)

        # Init the io dict
        # key = pid
        # value = [ read_bytes_old, write_bytes_old ]
        self.io_old = {}

        # Wether or not to enable process tree
        self._enable_tree = False
        self.process_tree = None

        # Init stats
        self.auto_sort = True
        self._sort_key = 'cpu_percent'
        self.allprocesslist = []
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
        # Default is to enable the processes stats
        self.disable_tag = False

        # Extended stats for top process is enable by default
        self.disable_extended_tag = False

        # Maximum number of processes showed in the UI (None if no limit)
        self._max_processes = None

        # Process filter is a regular expression
        self._process_filter = None
        self._process_filter_re = None

        # Whether or not to hide kernel threads
        self.no_kernel_threads = False

    def enable(self):
        """Enable process stats."""
        self.disable_tag = False
        self.update()

    def disable(self):
        """Disable process stats."""
        self.disable_tag = True

    def enable_extended(self):
        """Enable extended process stats."""
        self.disable_extended_tag = False
        self.update()

    def disable_extended(self):
        """Disable extended process stats."""
        self.disable_extended_tag = True

    @property
    def max_processes(self):
        """Get the maximum number of processes showed in the UI."""
        return self._max_processes

    @max_processes.setter
    def max_processes(self, value):
        """Set the maximum number of processes showed in the UI."""
        self._max_processes = value

    @property
    def process_filter(self):
        """Get the process filter."""
        return self._process_filter

    @process_filter.setter
    def process_filter(self, value):
        """Set the process filter."""
        logger.info("Set process filter to {0}".format(value))
        self._process_filter = value
        if value is not None:
            try:
                self._process_filter_re = re.compile(value)
                logger.debug("Process filter regex compilation OK: {0}".format(self.process_filter))
            except Exception:
                logger.error("Cannot compile process filter regex: {0}".format(value))
                self._process_filter_re = None
        else:
            self._process_filter_re = None

    @property
    def process_filter_re(self):
        """Get the process regular expression compiled."""
        return self._process_filter_re

    def is_filtered(self, value):
        """Return True if the value should be filtered."""
        if self.process_filter is None:
            # No filter => Not filtered
            return False
        else:
            # logger.debug(self.process_filter + " <> " + value + " => " + str(self.process_filter_re.match(value) is None))
            return self.process_filter_re.match(value) is None

    def disable_kernel_threads(self):
        """Ignore kernel threads in process list."""
        self.no_kernel_threads = True

    def enable_tree(self):
        """Enable process tree."""
        self._enable_tree = True

    def is_tree_enabled(self):
        """Return True if process tree is enabled, False instead."""
        return self._enable_tree

    @property
    def sort_reverse(self):
        """Return True to sort processes in reverse 'key' order, False instead."""
        if self.sort_key == 'name' or self.sort_key == 'username':
            return False

        return True

    def __get_mandatory_stats(self, proc, procstat):
        """
        Get mandatory_stats: need for the sorting/filter step.

        => cpu_percent, memory_percent, io_counters, name, cmdline
        """
        procstat['mandatory_stats'] = True

        # Process CPU, MEM percent and name
        try:
            procstat.update(proc.as_dict(
                attrs=['username', 'cpu_percent', 'memory_percent',
                       'name', 'cpu_times'], ad_value=''))
        except psutil.NoSuchProcess:
            # Try/catch for issue #432
            return None
        if procstat['cpu_percent'] == '' or procstat['memory_percent'] == '':
            # Do not display process if we cannot get the basic
            # cpu_percent or memory_percent stats
            return None

        # Process command line (cached with internal cache)
        try:
            self.cmdline_cache[procstat['pid']]
        except KeyError:
            # Patch for issue #391
            try:
                self.cmdline_cache[
                    procstat['pid']] = ' '.join(proc.cmdline())
            except (AttributeError, UnicodeDecodeError, psutil.AccessDenied, psutil.NoSuchProcess):
                self.cmdline_cache[procstat['pid']] = ""
        procstat['cmdline'] = self.cmdline_cache[procstat['pid']]

        # Process IO
        # procstat['io_counters'] is a list:
        # [read_bytes, write_bytes, read_bytes_old, write_bytes_old, io_tag]
        # If io_tag = 0 > Access denied (display "?")
        # If io_tag = 1 > No access denied (display the IO rate)
        # Note Disk IO stat not available on Mac OS
        if not is_mac:
            try:
                # Get the process IO counters
                proc_io = proc.io_counters()
                io_new = [proc_io.read_bytes, proc_io.write_bytes]
            except (psutil.AccessDenied, psutil.NoSuchProcess, NotImplementedError):
                # Access denied to process IO (no root account)
                # NoSuchProcess (process die between first and second grab)
                # Put 0 in all values (for sort) and io_tag = 0 (for
                # display)
                procstat['io_counters'] = [0, 0] + [0, 0]
                io_tag = 0
            else:
                # For IO rate computation
                # Append saved IO r/w bytes
                try:
                    procstat['io_counters'] = io_new + \
                        self.io_old[procstat['pid']]
                except KeyError:
                    procstat['io_counters'] = io_new + [0, 0]
                # then save the IO r/w bytes
                self.io_old[procstat['pid']] = io_new
                io_tag = 1

            # Append the IO tag (for display)
            procstat['io_counters'] += [io_tag]

        return procstat

    def __get_standard_stats(self, proc, procstat):
        """
        Get standard_stats: for all the displayed processes.

        => username, status, memory_info, cpu_times
        """
        procstat['standard_stats'] = True

        # Process username (cached with internal cache)
        try:
            self.username_cache[procstat['pid']]
        except KeyError:
            try:
                self.username_cache[procstat['pid']] = proc.username()
            except psutil.NoSuchProcess:
                self.username_cache[procstat['pid']] = "?"
            except (KeyError, psutil.AccessDenied):
                try:
                    self.username_cache[procstat['pid']] = proc.uids().real
                except (KeyError, AttributeError, psutil.AccessDenied):
                    self.username_cache[procstat['pid']] = "?"
        procstat['username'] = self.username_cache[procstat['pid']]

        # Process status, nice, memory_info and cpu_times
        try:
            procstat.update(
                proc.as_dict(attrs=['status', 'nice', 'memory_info', 'cpu_times']))
        except psutil.NoSuchProcess:
            pass
        else:
            procstat['status'] = str(procstat['status'])[:1].upper()

        return procstat

    def __get_extended_stats(self, proc, procstat):
        """
        Get extended_stats: only for top processes (see issue #403).

        => connections (UDP/TCP), memory_swap...
        """
        procstat['extended_stats'] = True

        # CPU affinity (Windows and Linux only)
        try:
            procstat.update(proc.as_dict(attrs=['cpu_affinity']))
        except psutil.NoSuchProcess:
            pass
        except AttributeError:
            procstat['cpu_affinity'] = None
        # Memory extended
        try:
            procstat.update(proc.as_dict(attrs=['memory_info_ex']))
        except psutil.NoSuchProcess:
            pass
        except AttributeError:
            procstat['memory_info_ex'] = None
        # Number of context switch
        try:
            procstat.update(proc.as_dict(attrs=['num_ctx_switches']))
        except psutil.NoSuchProcess:
            pass
        except AttributeError:
            procstat['num_ctx_switches'] = None
        # Number of file descriptors (Unix only)
        try:
            procstat.update(proc.as_dict(attrs=['num_fds']))
        except psutil.NoSuchProcess:
            pass
        except AttributeError:
            procstat['num_fds'] = None
        # Threads number
        try:
            procstat.update(proc.as_dict(attrs=['num_threads']))
        except psutil.NoSuchProcess:
            pass
        except AttributeError:
            procstat['num_threads'] = None

        # Number of handles (Windows only)
        if is_windows:
            try:
                procstat.update(proc.as_dict(attrs=['num_handles']))
            except psutil.NoSuchProcess:
                pass
        else:
            procstat['num_handles'] = None

        # SWAP memory (Only on Linux based OS)
        # http://www.cyberciti.biz/faq/linux-which-process-is-using-swap/
        if is_linux:
            try:
                procstat['memory_swap'] = sum(
                    [v.swap for v in proc.memory_maps()])
            except psutil.NoSuchProcess:
                pass
            except psutil.AccessDenied:
                procstat['memory_swap'] = None
            except Exception:
                # Add a dirty except to handle the PsUtil issue #413
                procstat['memory_swap'] = None

        # Process network connections (TCP and UDP)
        try:
            procstat['tcp'] = len(proc.connections(kind="tcp"))
            procstat['udp'] = len(proc.connections(kind="udp"))
        except Exception:
            procstat['tcp'] = None
            procstat['udp'] = None

        # IO Nice
        # http://pythonhosted.org/psutil/#psutil.Process.ionice
        if is_linux or is_windows:
            try:
                procstat.update(proc.as_dict(attrs=['ionice']))
            except psutil.NoSuchProcess:
                pass
        else:
            procstat['ionice'] = None

        return procstat

    def __get_process_stats(self, proc,
                            mandatory_stats=True,
                            standard_stats=True,
                            extended_stats=False):
        """Get stats of running processes."""
        # Process ID (always)
        procstat = proc.as_dict(attrs=['pid'])

        if mandatory_stats:
            procstat = self.__get_mandatory_stats(proc, procstat)

        if procstat is not None and standard_stats:
            procstat = self.__get_standard_stats(proc, procstat)

        if procstat is not None and extended_stats and not self.disable_extended_tag:
            procstat = self.__get_extended_stats(proc, procstat)

        return procstat

    def update(self):
        """Update the processes stats."""
        # Reset the stats
        self.processlist = []
        self.processcount = {'total': 0, 'running': 0, 'sleeping': 0, 'thread': 0}

        # Do not process if disable tag is set
        if self.disable_tag:
            return

        # Get the time since last update
        time_since_update = getTimeSinceLastUpdate('process_disk')

        # Build an internal dict with only mandatories stats (sort keys)
        processdict = {}
        for proc in psutil.process_iter():
            # Ignore kernel threads if needed
            if self.no_kernel_threads and not is_windows and is_kernel_thread(proc):
                continue

            # If self.max_processes is None: Only retreive mandatory stats
            # Else: retreive mandatory and standard stats
            try:
                s = self.__get_process_stats(proc, 
                                             mandatory_stats=True, 
                                             standard_stats=self.get_max_processes() is None)
            except psutil.NoSuchProcess:
                # Can happen if process no longer exists
                continue
            # Continue to the next process if it has to be filtered
            if s is None or (self.is_filtered(s['cmdline']) and self.is_filtered(s['name'])):
                continue
            # Ok add the process to the list
            processdict[proc] = s
            # ignore the 'idle' process on Windows and *BSD
            # ignore the 'kernel_task' process on OS X
            # waiting for upstream patch from psutil
            if (is_bsd and processdict[proc]['name'] == 'idle' or
                    is_windows and processdict[proc]['name'] == 'System Idle Process' or
                    is_mac and processdict[proc]['name'] == 'kernel_task'):
                continue
            # Update processcount (global statistics)
            try:
                self.processcount[str(proc.status())] += 1
            except KeyError:
                # Key did not exist, create it
                try:
                    self.processcount[str(proc.status())] = 1
                except psutil.NoSuchProcess:
                    pass
            except psutil.NoSuchProcess:
                pass
            else:
                self.processcount['total'] += 1
            # Update thread number (global statistics)
            try:
                self.processcount['thread'] += proc.num_threads()
            except Exception:
                pass

        if self._enable_tree:
            self.process_tree = ProcessTreeNode.build_tree(processdict,
                                                           self.sort_key,
                                                           self.sort_reverse,
                                                           self.no_kernel_threads)

            for i, node in enumerate(self.process_tree):
                # Only retreive stats for visible processes (max_processes)
                if self.max_processes is not None and i >= self.max_processes:
                    break

                # add standard stats
                new_stats = self.__get_process_stats(node.process,
                                                     mandatory_stats=False,
                                                     standard_stats=True,
                                                     extended_stats=False)
                if new_stats is not None:
                    node.stats.update(new_stats)

                # Add a specific time_since_update stats for bitrate
                node.stats['time_since_update'] = time_since_update

        else:
            # Process optimization
            # Only retreive stats for visible processes (max_processes)
            if self.max_processes is not None:
                # Sort the internal dict and cut the top N (Return a list of tuple)
                # tuple=key (proc), dict (returned by __get_process_stats)
                try:
                    processiter = sorted(processdict.items(),
                                         key=lambda x: x[1][self.sort_key],
                                         reverse=self.sort_reverse)
                except (KeyError, TypeError) as e:
                    logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e))
                    logger.error("%s" % str(processdict.items()[0]))
                    # Fallback to all process (issue #423)
                    processloop = processdict.items()
                    first = False
                else:
                    processloop = processiter[0:self.max_processes]
                    first = True
            else:
                # Get all processes stats
                processloop = processdict.items()
                first = False

            for i in processloop:
                # Already existing mandatory stats
                procstat = i[1]
                if self.max_processes is not None:
                    # Update with standard stats
                    # and extended stats but only for TOP (first) process
                    s = self.__get_process_stats(i[0],
                                                 mandatory_stats=False,
                                                 standard_stats=True,
                                                 extended_stats=first)
                    if s is None:
                        continue
                    procstat.update(s)
                # Add a specific time_since_update stats for bitrate
                procstat['time_since_update'] = time_since_update
                # Update process list
                self.processlist.append(procstat)
                # Next...
                first = False

        # Build the all processes list used by the monitored list
        self.allprocesslist = processdict.values()

        # Clean internals caches if timeout is reached
        if self.cache_timer.finished():
            self.username_cache = {}
            self.cmdline_cache = {}
            # Restart the timer
            self.cache_timer.reset()

    def getcount(self):
        """Get the number of processes."""
        return self.processcount

    def getalllist(self):
        """Get the allprocesslist."""
        return self.allprocesslist

    def getlist(self, sortedby=None):
        """Get the processlist."""
        return self.processlist

    def gettree(self):
        """Get the process tree."""
        return self.process_tree

    @property
    def sort_key(self):
        """Get the current sort key."""
        return self._sort_key

    @sort_key.setter
    def sort_key(self, key):
        """Set the current sort key."""
        self._sort_key = key
Esempio n. 28
0
class GlancesInstance(object):

    """All the methods of this class are published as XML-RPC methods."""

    def __init__(self, cached_time=1, config=None):
        # Init stats
        self.stats = GlancesStatsServer(config)

        # Initial update
        self.stats.update()

        # cached_time is the minimum time interval between stats updates
        # i.e. XML/RPC calls will not retrieve updated info until the time
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time

    def __update__(self):
        # Never update more than 1 time per cached_time
        if self.timer.finished():
            self.stats.update()
            self.timer = Timer(self.cached_time)

    def init(self):
        # Return the Glances version
        return version

    def getAll(self):
        # Update and return all the stats
        self.__update__()
        return json.dumps(self.stats.getAll())

    def getAllPlugins(self):
        # Return the plugins list
        return json.dumps(self.stats.getAllPlugins())

    def getAllLimits(self):
        # Return all the plugins limits
        return json.dumps(self.stats.getAllLimitsAsDict())

    def getAllViews(self):
        # Return all the plugins views
        return json.dumps(self.stats.getAllViewsAsDict())

    def getAllMonitored(self):
        # Return the processes monitored list
        # return json.dumps(self.monitors.getAll())
        return json.dumps(self.stats.getAll()['monitor'])

    def __getattr__(self, item):
        """Overwrite the getattr method in case of attribute is not found.

        The goal is to dynamically generate the API get'Stats'() methods.
        """
        header = 'get'
        # Check if the attribute starts with 'get'
        if item.startswith(header):
            try:
                # Update the stat
                self.__update__()
                # Return the attribute
                return getattr(self.stats, item)
            except Exception:
                # The method is not found for the plugin
                raise AttributeError(item)
        else:
            # Default behavior
            raise AttributeError(item)
Esempio n. 29
0
 def __update__(self):
     # Never update more than 1 time per cached_time
     if self.timer.finished():
         self.stats.update()
         self.timer = Timer(self.cached_time)
Esempio n. 30
0
class GlancesInstance(object):
    """All the methods of this class are published as XML-RPC methods."""
    def __init__(self, cached_time=1, config=None):
        # Init stats
        self.stats = GlancesStatsServer(config)

        # Initial update
        self.stats.update()

        # cached_time is the minimum time interval between stats updates
        # i.e. XML/RPC calls will not retrieve updated info until the time
        # since last update is passed (will retrieve old cached info instead)
        self.timer = Timer(0)
        self.cached_time = cached_time

    def __update__(self):
        # Never update more than 1 time per cached_time
        if self.timer.finished():
            self.stats.update()
            self.timer = Timer(self.cached_time)

    def init(self):
        # Return the Glances version
        return version

    def getAll(self):
        # Update and return all the stats
        self.__update__()
        return json.dumps(self.stats.getAll())

    def getAllPlugins(self):
        # Return the plugins list
        return json.dumps(self.stats.getAllPlugins())

    def getAllLimits(self):
        # Return all the plugins limits
        return json.dumps(self.stats.getAllLimitsAsDict())

    def getAllViews(self):
        # Return all the plugins views
        return json.dumps(self.stats.getAllViewsAsDict())

    def getAllMonitored(self):
        # Return the processes monitored list
        # return json.dumps(self.monitors.getAll())
        return json.dumps(self.stats.getAll()['monitor'])

    def __getattr__(self, item):
        """Overwrite the getattr method in case of attribute is not found.

        The goal is to dynamically generate the API get'Stats'() methods.
        """
        header = 'get'
        # Check if the attribute starts with 'get'
        if item.startswith(header):
            try:
                # Update the stat
                self.__update__()
                # Return the attribute
                return getattr(self.stats, item)
            except Exception:
                # The method is not found for the plugin
                raise AttributeError(item)
        else:
            # Default behavior
            raise AttributeError(item)