예제 #1
0
    def r_engine_status():
        cpu_used = int(round((psutil.cpu_times().user * 100) + \
                   (psutil.cpu_times().system * 100), 0))
        cpu_free = int(round(psutil.cpu_times().idle * 100, 0))

        s = {
            "cpu": {
                "used": cpu_used,
                "free": cpu_free
            },
            "memory": {
                "physical": {
                    "used": psutil.phymem_usage().used,
                    "free": psutil.phymem_usage().free
                },
                "virtual": {
                    "used": psutil.virtmem_usage().used,
                    "free": psutil.virtmem_usage().free
                }
            },
            "disk": {
                "used": psutil.disk_usage('/').used,
                "free": psutil.disk_usage('/').free
            }
        }

        r = Response("success", "status", s).get()
        return r
예제 #2
0
def get_cpu_node():
    cpu_count = RunnableNode('count', method=lambda: ([len(ps.cpu_percent(percpu=True))], 'cores'))
    cpu_percent = LazyNode('percent', method=lambda: (ps.cpu_percent(interval=0.5, percpu=True), '%'))
    cpu_user = RunnableNode('user', method=lambda: ([x.user for x in ps.cpu_times(percpu=True)], 'ms'))
    cpu_system = RunnableNode('system', method=lambda: ([x.system for x in ps.cpu_times(percpu=True)], 'ms'))
    cpu_idle = RunnableNode('idle', method=lambda: ([x.idle for x in ps.cpu_times(percpu=True)], 'ms'))
    return ParentNode('cpu', children=[cpu_count, cpu_system, cpu_percent, cpu_user, cpu_idle])
예제 #3
0
파일: lab4.py 프로젝트: emuu/is105
def psutils_use():
    cpu = psutil.cpu_times()
    navn = cpu._fields
    verdi = tuple(psutil.cpu_times())
    liste = zip(navn, verdi)
    for navn, verdi in liste:
        print navn, " = ", verdi
예제 #4
0
 def test_sys_cpu_times2(self):
     t1 = sum(psutil.cpu_times())
     time.sleep(0.1)
     t2 = sum(psutil.cpu_times())
     difference = t2 - t1
     if not difference >= 0.05:
         self.fail("difference %s" % difference)
예제 #5
0
파일: test_system.py 프로젝트: mabuaita/ops
 def test_cpu_times_time_increases(self):
     # Make sure time increases between calls.
     t1 = sum(psutil.cpu_times())
     time.sleep(0.1)
     t2 = sum(psutil.cpu_times())
     difference = t2 - t1
     if not difference >= 0.05:
         self.fail("difference %s" % difference)
예제 #6
0
파일: host.py 프로젝트: javacruft/nova-lxd
 def get_host_cpu_stats(self):
     return {
         'kernel': int(psutil.cpu_times()[2]),
         'idle': int(psutil.cpu_times()[3]),
         'user': int(psutil.cpu_times()[0]),
         'iowait': int(psutil.cpu_times()[4]),
         'frequency': self.host_cpu_info['hz_advertised']
     }
예제 #7
0
파일: lab4.py 프로젝트: IS105/IS-105-Lab4
def psutils_use():
	"""
	Henter lister med systeminformasjon fra /proc og bearbeider disse
	"""
	name = psutil.cpu_times()._fields
        value = psutil.cpu_times()
        result = zip(name, value)
        for name, value in result:
            print("\t%s = %s") % (name, value)
예제 #8
0
파일: test_system.py 프로젝트: mabuaita/ops
 def test_cpu_times_comparison(self):
     # Make sure the sum of all per cpu times is almost equal to
     # base "one cpu" times.
     base = psutil.cpu_times()
     per_cpu = psutil.cpu_times(percpu=True)
     summed_values = base._make([sum(num) for num in zip(*per_cpu)])
     for field in base._fields:
         self.assertAlmostEqual(
             getattr(base, field), getattr(summed_values, field), delta=1)
예제 #9
0
파일: host.py 프로젝트: rockstar/nova-lxd
 def get_host_cpu_stats(self):
     cpuinfo = self._get_cpu_info()
     return {
         "kernel": int(psutil.cpu_times()[2]),
         "idle": int(psutil.cpu_times()[3]),
         "user": int(psutil.cpu_times()[0]),
         "iowait": int(psutil.cpu_times()[4]),
         "frequency": cpuinfo.get("cpu mhz", 0),
     }
예제 #10
0
파일: lab4.py 프로젝트: sondresallaup/is105
def psutils_use():
	"""
	Henter lister med systeminformasjon fra /proc og bearbeider disse
	"""
	# Impleementer funksjonen her
	print 'user = '******'nice = ' + str(psutil.cpu_times()[1])
	print 'system = ' + str(psutil.cpu_times()[2])
	print 'idle = ' + str(psutil.cpu_times()[3])
예제 #11
0
파일: host.py 프로젝트: dstelter/nova-lxd
 def get_host_cpu_stats(self):
     cpuinfo = self._get_cpu_info()
     return {
         'kernel': int(psutil.cpu_times()[2]),
         'idle': int(psutil.cpu_times()[3]),
         'user': int(psutil.cpu_times()[0]),
         'iowait': int(psutil.cpu_times()[4]),
         'frequency': cpuinfo.get('cpu mhz', 0)
     }
예제 #12
0
 def test_cpu_times_time_increases(self):
     # Make sure time increases between calls.
     t1 = sum(psutil.cpu_times())
     stop_at = time.time() + 1
     while time.time() < stop_at:
         t2 = sum(psutil.cpu_times())
         if t2 > t1:
             return
     self.fail("time remained the same")
예제 #13
0
파일: monitor.py 프로젝트: hackliff/domobot
 def context(self, cpus, memory, network):
     #TODO Store the very first value as reference
     if ( cpus ):
         print 'Available CPUs:', psutil.NUM_CPUS
         print psutil.cpu_times()
         print psutil.cpu_percent(interval=1, percpu=True)
     if ( memory ):
         print 'Memory counters:', psutil.disk_io_counters()
     if ( network ):
         print 'Nework trafic:', psutil.network_io_counters() 
예제 #14
0
파일: lab4.py 프로젝트: huseia13/IS-105
def psutils_use():
        """
        Henter lister med systeminformasjon fra /proc og bearbeider disse
        """
        # Impleementer funksjonen her
        x = psutil.cpu_times()._fields
        y = psutil.cpu_times()
        r = zip(x, y)
        for name, value in r:
                print("\t%s = %s") % (name, value)
예제 #15
0
파일: webserver.py 프로젝트: golya/FuzzLabs
    def get_cpu_stats(self):
        cpu_used = int(round((psutil.cpu_times().user * 100) + \
                   (psutil.cpu_times().system * 100), 0))
        cpu_free = int(round(psutil.cpu_times().idle * 100, 0))

        cpu_stat = {
            "used": cpu_used,
            "free": cpu_free
        }

        return cpu_stat
예제 #16
0
파일: ps.py 프로젝트: slient2010/ManageGame
def cpu_times(per_cpu=False, *args, **kwarg):
    """
    def cpu_times(per_cpu=False) -> Return the percent of time the CPU spends in each state, e.g. user, system, idle, nice, iowait, irq, softirq.
    @param per_cpu bool:if True return an array of percents for each CPU, otherwise aggregate all percents into one number
    @return dict:
    """
    if per_cpu:
        result = [dict(times._asdict()) for times in psutil.cpu_times(True)]
    else:
        result = dict(psutil.cpu_times(per_cpu)._asdict())
    return result
예제 #17
0
 def test_sys_per_cpu_times(self):
     for times in psutil.cpu_times(percpu=True):
         total = 0
         sum(times)
         for cp_time in times:
             self.assertIsInstance(cp_time, float)
             self.assertGreaterEqual(cp_time, 0.0)
             total += cp_time
         self.assertEqual(total, sum(times))
         str(times)
     self.assertEqual(len(psutil.cpu_times(percpu=True)[0]), len(psutil.cpu_times(percpu=False)))
예제 #18
0
 def collect(self):
     """
     Collector cpu stats
     """
     if os.access(self.PROC, os.R_OK):
 
         results = {}
         # Open file
         file = open(self.PROC)
         # Build Regex
         exp = '^(cpu[0-9]*)\s+(?P<user>\d+)\s+(?P<nice>\d+)\s+(?P<system>\d+)\s+(?P<idle>\d+)\s+(?P<iowait>\d+).*$'
         reg = re.compile(exp)
         for line in file:
             match = reg.match(line)
 
             if match:
                 cpu = match.group(1)
                 if cpu == 'cpu':
                     cpu = 'total'
                 results[cpu] = {}
                 results[cpu] = match.groupdict()
         # Close File
         file.close()
 
         for cpu in results.keys():
             stats = results[cpu]
             for s in stats.keys():
                 # Get Metric Name
                 metric_name = '.'.join([cpu, s])
                 # Publish Metric Derivative
                 self.publish(metric_name, self.derivative(metric_name, long(stats[s]), self.MAX_VALUES[s]))
         return True
     
     elif psutil:
         cpu_time = psutil.cpu_times(True)
         total_time = psutil.cpu_times()
         for i in range(0, len(cpu_time)):
             metric_name = 'cpu'+str(i)
             self.publish(metric_name+'.user',   self.derivative(metric_name+'.user',   cpu_time[i].user,   self.MAX_VALUES['user']))
             self.publish(metric_name+'.nice',   self.derivative(metric_name+'.nice',   cpu_time[i].nice,   self.MAX_VALUES['nice']))
             self.publish(metric_name+'.system', self.derivative(metric_name+'.system', cpu_time[i].system, self.MAX_VALUES['system']))
             self.publish(metric_name+'.idle',   self.derivative(metric_name+'.idle',   cpu_time[i].idle,   self.MAX_VALUES['idle']))
         
         metric_name = 'total'
         self.publish(metric_name+'.user',   self.derivative(metric_name+'.user',   total_time.user,   self.MAX_VALUES['user']))
         self.publish(metric_name+'.nice',   self.derivative(metric_name+'.nice',   total_time.nice,   self.MAX_VALUES['nice']))
         self.publish(metric_name+'.system', self.derivative(metric_name+'.system', total_time.system, self.MAX_VALUES['system']))
         self.publish(metric_name+'.idle',   self.derivative(metric_name+'.idle',   total_time.idle,   self.MAX_VALUES['idle']))
     
         return True
     
     return None
예제 #19
0
 def cpu_times(self):
     ret = {}
     ret['cpu_user'] = psutil.cpu_times().user
     ret['cpu_nice'] = psutil.cpu_times().nice
     ret['cpu_system'] = psutil.cpu_times().system
     ret['cpu_idle'] = psutil.cpu_times().idle
     ret['cpu_iowait'] = psutil.cpu_times().iowait
     ret['cpu_irq'] = psutil.cpu_times().irq
     ret['cpu_softirq'] = psutil.cpu_times().softirq
     ret['cpu_steal'] = psutil.cpu_times().steal
     ret['cpu_guest'] = psutil.cpu_times().guest
     ret['cpu_guest_nice'] = psutil.cpu_times().guest_nice
     return ret
예제 #20
0
def get_cpu_stats():
    '''return cpu stats'''
    cputime = psutil.cpu_times()

    cpu = _calculate_cpu_stats(cputime)

    percpu = [_calculate_cpu_stats(val) for val in
                psutil.cpu_times(percpu=True)]

    return {
        "global": cpu,
        "cpu": percpu
    }
예제 #21
0
 def test_sys_per_cpu_times_2(self):
     tot1 = psutil.cpu_times(percpu=True)
     stop_at = time.time() + 0.1
     while True:
         if time.time() >= stop_at:
             break
     tot2 = psutil.cpu_times(percpu=True)
     for t1, t2 in zip(tot1, tot2):
         t1, t2 = sum(t1), sum(t2)
         difference = t2 - t1
         if difference >= 0.05:
             return
     self.fail()
예제 #22
0
def iowait():
    """
    Returns the amount of time spent by the cpu waiting
    on io

    .. note::
        on platforms other than linux this will return None
    """
    try:
        cpu_times = psutil.cpu_times()
        if hasattr(cpu_times, "iowait"):
            return psutil.cpu_times().iowait
    except AttributeError:  # pragma: no cover
        return None
예제 #23
0
def cputimes (percpu = False ):
    result = {}
    infocpu =  psutil.cpu_times( percpu = False)
    result['allcpu'] = __dictdata(infocpu)
    if percpu == False:
        #global time (all cpu)
        result['allcpu'] = __dictdata(infocpu)
    elif percpu == True:
        infocpu =  psutil.cpu_times( percpu = percpu)
        nbcpu = len(infocpu)
        result['nbcpu'] = nbcpu
        for cpu_nb in range(0,nbcpu):
            result['cpu%s'% cpu_nb] = __dictdata(infocpu[cpu_nb])
    return result
예제 #24
0
파일: ps.py 프로젝트: atoponce/salt
def cpu_times(per_cpu=False):
    '''
    Return the percent of time the CPU spends in each state,
    e.g. user, system, idle, nice, iowait, irq, softirq.
    per_cpu = if True return an array of percents for each CPU,
              otherwise aggregate all precents into one number
    '''
    if per_cpu:
        result = []
        for cpu_times in psutil.cpu_times(True):
            result.append(dict(cpu_times._asdict()))
    else:
        result = dict(psutil.cpu_times(per_cpu)._asdict())
    return result
예제 #25
0
파일: graph.py 프로젝트: ramnes/qtile
    def _getvalues(self):

        if isinstance(self.core, int):
            if self.core > psutil.cpu_count() - 1:
                raise ValueError("No such core: {}".format(self.core))
            cpu = psutil.cpu_times(percpu=True)[self.core]
        else:
            cpu = psutil.cpu_times()

        user = cpu.user * 100
        nice = cpu.nice * 100
        sys = cpu.system * 100
        idle = cpu.idle * 100

        return (int(user), int(nice), int(sys), int(idle))
예제 #26
0
파일: test_system.py 프로젝트: mabuaita/ops
 def test_per_cpu_times_2(self):
     # Simulate some work load then make sure time have increased
     # between calls.
     tot1 = psutil.cpu_times(percpu=True)
     stop_at = time.time() + 0.1
     while True:
         if time.time() >= stop_at:
             break
     tot2 = psutil.cpu_times(percpu=True)
     for t1, t2 in zip(tot1, tot2):
         t1, t2 = sum(t1), sum(t2)
         difference = t2 - t1
         if difference >= 0.05:
             return
     self.fail()
예제 #27
0
 def __lookup(self):
     mycputimes=psutil.cpu_times(percpu=False)
     if os.name == "nt":
         tmp = [mycputimes.user,0.0,mycputimes.system,mycputimes.idle,0.0,0.0,0.0]
     else:
         tmp = [mycputimes.user,mycputimes.nice,mycputimes.system,mycputimes.idle,mycputimes.iowait,mycputimes.irq,mycputimes.softirq]
     return tmp
예제 #28
0
def show_system_resource(hide=False):
    """
    現在のシステムリソースを表示
    """

    if hide:
        pass

    else:
        print("System info")
        print("CPU info\n", psutil.cpu_times())
        print("Memory info\n", psutil.virtual_memory(), "\n")


    logger.debug("\nSystem info\nCPU info\n{}\nMemory info\n{}\n".format(
        psutil.cpu_times(), psutil.virtual_memory()))
예제 #29
0
    def _get_cpu_states(self):
        # This will get CPU states as a percentage of time
        # used to help calcuate percentage times in the cpu_times_percent function isn't available in psutil
        # due to the age of the version (affects RHEL 6 and older 
        # based on  https://github.com/giampaolo/psutil/blob/master/psutil/__init__.py#L1542-1609
        # since the math isn't too massive computationally, we'll use this universally for now
        # if this is rebased into RHEL 6 we may revisit it then - jduncan

        try:
            t2 = psutil.cpu_times()._asdict()
            all_delta = sum(t2.values()) - sum(self.cpu_buffers.values())
            for field in self.cpu_buffers.keys():
                field_delta = t2[field] - self.cpu_buffers[field]
                try:
                    field_perc = (100 * field_delta) / all_delta
                except ZeroDivisionError:
                    field_perc = 0.0
                field_perc = round(field_perc, 2)

                # now we add the rounded percentage data to the New Relic metrics for uploading
                title = "Component/CPU/State Time/%s[percent]" % field
                self.metric_data[title] =  field_perc

                # and finally set the buffer to the current values so the next time the math will be right
                self.cpu_buffers[field] = t2[field]
            
        except Exception, e:
            self.logger.exception(e)
            pass
예제 #30
0
def test_cpu_times_individual(compare_cpu_times, flush):
    pslib_cputimes = P.cpu_times(1) 
    psutil_cputimes = psutil.cpu_times(True)

    for i,psutil_measure in enumerate(psutil_cputimes):
        pslib_measure = pslib_cputimes[i]
        assert compare_cpu_times(pslib_measure, psutil_measure)
예제 #31
0
import psutil
import time
import json
import urllib, urllib2
import socket
url = "http://localhost:5000/post"

while True:
    print(chr(27) + "[2J")

    cpu, disks, host_info, memory, network, processes, other = [],[],[],[],[],[],[]

    host_info.append(int(time.time()))
    host_info.append(socket.getfqdn())

    cpu.append(vars(psutil.cpu_times()))
    cpu.append(psutil.cpu_percent(percpu=True))

    memory.append(vars(psutil.virtual_memory()))
    memory.append(vars(psutil.swap_memory()))

    disks.append(vars(psutil.disk_io_counters()))

    if_addr = {}
    for k, v in psutil.net_if_addrs().iteritems():
        values = []
        for i in v:
            values.append(vars(i))
        if_addr[k] = values

    netio = {}
예제 #32
0
 def times():
     return psutil.cpu_times()
예제 #33
0
 def test_cpu_count_vs_cpu_times(self):
     self.assertEqual(psutil.cpu_count(),
                      len(psutil.cpu_times(percpu=True)))
예제 #34
0
 def execute(self) -> None:
     timestamp = time.time()
     cpu_percent = ps.cpu_percent(percpu=True)
     cpu_times = ps.cpu_times()
     self.acc += [[timestamp, *cpu_percent, cpu_times[0], cpu_times[1]]]
예제 #35
0
 def prepare_cpu_usage(self):
     cpuinfo = psutil.cpu_times()
     if hasattr(cpuinfo,'nice'):
         return [cpuinfo.user,cpuinfo.nice,cpuinfo.system,cpuinfo.idle]
     else:
         return [cpuinfo.user,0,cpuinfo.system,cpuinfo.idle]
예제 #36
0
def get_cpu_info():
    cpu_times = psutil.cpu_times()
    cpu_info['user'] = cpu_times.user
    cpu_info['system'] = cpu_times.system
    cpu_info['idle'] = cpu_times.idle
    cpu_info['percent'] = psutil.cpu_percent(interval=2)
예제 #37
0
 def on_stop_recording(self, task):
     """Notification that we are done with recording"""
     self.stop_cpu_throttling()
     import psutil
     if self.cpu_start is not None:
         cpu_end = psutil.cpu_times()
         cpu_busy = (cpu_end.user - self.cpu_start.user) + \
                 (cpu_end.system - self.cpu_start.system)
         cpu_total = cpu_busy + (cpu_end.idle - self.cpu_start.idle)
         cpu_pct = cpu_busy * 100.0 / cpu_total
         task['page_data']['fullyLoadedCPUms'] = int(cpu_busy * 1000.0)
         task['page_data']['fullyLoadedCPUpct'] = cpu_pct
         self.cpu_start = None
     self.recording = False
     if self.thread is not None:
         self.thread.join()
         self.thread = None
     # record the CPU/Bandwidth/memory info
     if self.usage_queue is not None and not self.usage_queue.empty(
     ) and task is not None:
         file_path = os.path.join(task['dir'],
                                  task['prefix']) + '_progress.csv.gz'
         gzfile = gzip.open(file_path, 'wb', 7)
         if gzfile:
             gzfile.write(
                 "Offset Time (ms),Bandwidth In (bps),CPU Utilization (%),Memory\n"
             )
             while not self.usage_queue.empty():
                 snapshot = self.usage_queue.get_nowait()
                 gzfile.write('{0:d},{1:d},{2:0.2f},-1\n'.format(
                     snapshot['time'], snapshot['bw'], snapshot['cpu']))
             gzfile.close()
     if self.tcpdump is not None:
         logging.debug('Stopping tcpdump')
         if platform.system() == 'Windows':
             tcpdump = os.path.join(self.support_path, 'tcpdump.exe')
             subprocess.call([tcpdump, 'stop'])
         else:
             subprocess.call(['sudo', 'killall', 'tcpdump'])
         self.tcpdump = None
         from .os_util import kill_all
         from .os_util import wait_for_all
         kill_all('tcpdump', False)
         wait_for_all('tcpdump')
     if self.ffmpeg is not None:
         logging.debug('Stopping video capture')
         if platform.system() == 'Windows':
             os.kill(self.ffmpeg.pid, signal.CTRL_BREAK_EVENT)
         else:
             self.ffmpeg.terminate()
         self.ffmpeg.communicate()
         self.ffmpeg = None
     if platform.system() == 'Windows':
         from .os_util import kill_all
         kill_all('ffmpeg.exe', True)
     else:
         subprocess.call(['killall', '-9', 'ffmpeg'])
     # kick off the video processing (async)
     if 'video_file' in task and os.path.isfile(task['video_file']):
         video_path = os.path.join(task['dir'], task['video_subdirectory'])
         support_path = os.path.join(
             os.path.abspath(os.path.dirname(__file__)), "support")
         if task['current_step'] == 1:
             filename = '{0:d}.{1:d}.histograms.json.gz'.format(
                 task['run'], task['cached'])
         else:
             filename = '{0:d}.{1:d}.{2:d}.histograms.json.gz'.format(
                 task['run'], task['cached'], task['current_step'])
         histograms = os.path.join(task['dir'], filename)
         progress_file = os.path.join(
             task['dir'], task['prefix']) + '_visual_progress.json.gz'
         visualmetrics = os.path.join(support_path, "visualmetrics.py")
         args = [
             'python', visualmetrics, '-vvvv', '-i', task['video_file'],
             '-d', video_path, '--force', '--quality',
             '{0:d}'.format(self.job['imageQuality']), '--viewport',
             '--orange', '--maxframes', '50', '--histogram', histograms,
             '--progress', progress_file
         ]
         if not task['navigated']:
             args.append('--forceblank')
         if 'renderVideo' in self.job and self.job['renderVideo']:
             video_out = os.path.join(
                 task['dir'], task['prefix']) + '_rendered_video.mp4'
             args.extend(['--render', video_out])
         if 'fullSizeVideo' in self.job and self.job['fullSizeVideo']:
             args.append('--full')
         if 'thumbsize' in self.job:
             try:
                 thumbsize = int(self.job['thumbsize'])
                 if thumbsize > 0 and thumbsize <= 2000:
                     args.extend(['--thumbsize', str(thumbsize)])
             except Exception:
                 pass
         logging.debug(' '.join(args))
         self.video_processing = subprocess.Popen(args)
     self.job['shaper'].reset()
예제 #38
0
    def on_start_recording(self, task):
        """Notification that we are about to start an operation that needs to be recorded"""
        import psutil
        if self.device_pixel_ratio is None:
            self.device_pixel_ratio = 1.0
            try:
                ratio = self.execute_js('window.devicePixelRatio')
                if ratio is not None:
                    self.device_pixel_ratio = max(1.0, float(ratio))
            except Exception:
                pass
        if task['log_data']:
            if not self.job['shaper'].configure(self.job):
                self.task['error'] = "Error configuring traffic-shaping"
            self.cpu_start = psutil.cpu_times()
            self.recording = True
            ver = platform.uname()
            task['page_data']['osVersion'] = '{0} {1}'.format(ver[0], ver[2])
            task['page_data']['os_version'] = '{0} {1}'.format(ver[0], ver[2])
            # Spawn tcpdump
            if self.tcpdump_enabled:
                self.pcap_file = os.path.join(task['dir'],
                                              task['prefix']) + '.cap'
                if platform.system() == 'Windows':
                    tcpdump = os.path.join(self.support_path, 'tcpdump.exe')
                    args = [tcpdump, 'start', self.pcap_file]
                else:
                    interface = 'any' if self.job[
                        'interface'] is None else self.job['interface']
                    args = [
                        'sudo', 'tcpdump', '-p', '-i', interface, '-s', '0',
                        '-w', self.pcap_file
                    ]
                logging.debug(' '.join(args))
                self.tcpdump = subprocess.Popen(args)
                # give it time to actually start capturing
                time.sleep(0.5)

            # Start video capture
            if self.job['capture_display'] is not None:
                if task['navigated']:
                    self.execute_js(SET_ORANGE)
                    time.sleep(1)
                task['video_file'] = os.path.join(
                    task['dir'], task['prefix']) + '_video.mp4'
                if platform.system() == 'Darwin':
                    width = int(
                        math.ceil(task['width'] * self.device_pixel_ratio))
                    height = int(
                        math.ceil(task['height'] * self.device_pixel_ratio))
                    args = [
                        'ffmpeg', '-f', 'avfoundation', '-i',
                        str(self.job['capture_display']), '-r',
                        str(self.job['fps']), '-filter:v',
                        'crop={0:d}:{1:d}:0:0'.format(width, height),
                        '-codec:v', 'libx264rgb', '-crf', '0', '-preset',
                        'ultrafast', task['video_file']
                    ]
                else:
                    grab = 'gdigrab' if platform.system(
                    ) == 'Windows' else 'x11grab'
                    args = [
                        'ffmpeg', '-f', grab, '-video_size',
                        '{0:d}x{1:d}'.format(task['width'],
                                             task['height']), '-framerate',
                        str(self.job['fps']), '-draw_mouse', '0', '-i',
                        str(self.job['capture_display']), '-codec:v',
                        'libx264rgb', '-crf', '0', '-preset', 'ultrafast',
                        task['video_file']
                    ]
                logging.debug(' '.join(args))
                try:
                    if platform.system() == 'Windows':
                        self.ffmpeg = subprocess.Popen(args, \
                            creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
                    else:
                        self.ffmpeg = subprocess.Popen(args)
                    # Wait up to 5 seconds for something to be captured
                    end_time = monotonic.monotonic() + 5
                    started = False
                    while not started and monotonic.monotonic() < end_time:
                        if os.path.isfile(task['video_file']):
                            video_size = os.path.getsize(task['video_file'])
                            logging.debug("Video file size: %d", video_size)
                            if video_size > 10000:
                                started = True
                        if not started:
                            time.sleep(0.1)
                except Exception:
                    pass

            # start the background thread for monitoring CPU and bandwidth
            self.usage_queue = Queue.Queue()
            self.thread = threading.Thread(target=self.background_thread)
            self.thread.daemon = True
            self.thread.start()
        self.start_cpu_throttling()
        swap()
        disk()
        loads()
        net()
    except Exception as e:
        logging.warning('获取系统状态信息失败:' + str(e))
    finally:
        submit_status()
        Timer(1, flush_status).start()


def uptime():
    __status['uptime'] = time.time() - psutil.boot_time()


__last_ct = psutil.cpu_times()


def cpu():
    global __last_ct
    cur_ct = psutil.cpu_times()

    last_total = sum(__last_ct)
    cur_total = sum(cur_ct)

    total = cur_total - last_total
    idle = cur_ct.idle - __last_ct.idle

    percent = (total - idle) / total * 100
    __last_ct = cur_ct
    __status['cpu'] = {'percent': percent}
예제 #40
0
#-*- coding: UTF-8 -*-
import psutil
import time
from influxdb import InfluxDBClient
cdata = psutil.cpu_times().user
data = psutil.net_io_counters().bytes_sent
free_used = psutil.virtual_memory().used
free_total = psutil.virtual_memory().total
#内存利用率(已使用的内存/总内存)
percentage_free = (free_used * 1.0) / (free_total * 100)
io2 = psutil.disk_io_counters().read_count
io1 = psutil.disk_io_counters().write_count
json_body = [{
    "measurement": "datas",
    #"time": "2017-03-12T22:00:00Z",
    "fields": {
        "bytes_sent": data,
        "percentage_free": percentage_free,
        "read_count": io2,
        "write_count": io1,
        "cpu_user": cdata
    }
}]

client = InfluxDBClient('118.89.217.73', 8086, 'root', '',
                        'mydb')  # 初始化(指定要操作的数据库
client.write_points(json_body)  # 写入数据,同时创建表


def showDBNames(client):
    result = client.query('select * from datas')  # 显示数据库中的表
예제 #41
0
import psutil
print psutil.cpu_times()
print psutil.cpu_count()
print psutil.cpu_count(logical=False)
예제 #42
0
def get_uptime():
    uptime = dict(psutil.cpu_times(percpu=False)._asdict())
    print("Uptime: {} seconds ".format(uptime['idle']))
    return uptime['idle']
예제 #43
0
 def prepare_cpu_usage(self):
     t = p.cpu_times()
     if hasattr(t, 'nice'):
         return [t.user, t.nice, t.system, t.idle]
     else:
         return [t.user, 0, t.system, t.idle]
예제 #44
0
import psutil
import sys

if len(sys.argv) <= 1:
    print("Enter param 'cpu' or 'mem' to get metrics")
    quit()

if sys.argv[1] == "cpu":
    cpu = psutil.cpu_times()
    systemCpu = "system.cpu."
    idle = "idel " + str(cpu.idle)
    user = "******" + str(cpu.user)
    guest = "guest " + str(cpu.guest)
    iowait = "iowait " + str(cpu.iowait)
    stolen = "stolen " + str(cpu.steal)
    system = "system " + str(cpu.system)
    cpuArray = [idle, user, guest, iowait, stolen, system]
    for x in cpuArray:
        print(systemCpu + x)

if sys.argv[1] == "mem":
    mem = psutil.virtual_memory()
    swap = psutil.swap_memory()
    virtualStr = "virtual "
    swapStr = "swap "
    vtotal = "total " + str(mem.total)
    vused = "used " + str(mem.used)
    vfree = "free " + str(mem.free)
    vshared = "shared " + str(mem.shared)
    virtualArray = [vtotal, vused, vfree, vshared]
    stotal = "total " + str(swap.total)
예제 #45
0
파일: system.py 프로젝트: tgd1973/aaPanel
 def get_cpu_time(self):
     cpu_time = 0.00
     cpu_times = psutil.cpu_times()
     for s in cpu_times:
         cpu_time += s
     return cpu_time
예제 #46
0
    async def info(self, ctx, *args: str):
        """Summary of cpu, memory, disk and network information
         Usage: info [option]
         Examples:
             sysinfo           Shows all available info
             sysinfo cpu       Shows CPU usage
             sysinfo memory    Shows memory usage
             sysinfo file      Shows full path of open files
             sysinfo disk      Shows disk usage
             sysinfo network   Shows network usage
             sysinfo boot      Shows boot time
         """

        options = ('cpu', 'memory', 'file', 'disk', 'network', 'boot')

        # CPU
        cpu_count_p = psutil.cpu_count(logical=False)
        cpu_count_l = psutil.cpu_count()
        if cpu_count_p is None:
            cpu_count_p = "N/A"
        cpu_cs = ("CPU Count"
                  "\n\t{0:<9}: {1:>3}".format("Physical", cpu_count_p) +
                  "\n\t{0:<9}: {1:>3}".format("Logical", cpu_count_l))
        psutil.cpu_percent(interval=None, percpu=True)
        await asyncio.sleep(1)
        cpu_p = psutil.cpu_percent(interval=None, percpu=True)
        cpu_ps = ("CPU Usage"
                  "\n\t{0:<8}: {1}".format("Per CPU", cpu_p) +
                  "\n\t{0:<8}: {1:.1f}%".format("Overall",
                                                sum(cpu_p) / len(cpu_p)))
        cpu_t = psutil.cpu_times()
        width = max([
            len("{:,}".format(int(n)))
            for n in [cpu_t.user, cpu_t.system, cpu_t.idle]
        ])
        cpu_ts = ("CPU Times"
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "User", int(cpu_t.user), width=width) +
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "System", int(cpu_t.system), width=width) +
                  "\n\t{0:<7}: {1:>{width},}".format(
                      "Idle", int(cpu_t.idle), width=width))

        # Memory
        mem_v = psutil.virtual_memory()
        width = max([
            len(self._size(n)) for n in
            [mem_v.total, mem_v.available, (mem_v.total - mem_v.available)]
        ])
        mem_vs = ("Virtual Memory"
                  "\n\t{0:<10}: {1:>{width}}".format(
                      "Total", self._size(mem_v.total), width=width) +
                  "\n\t{0:<10}: {1:>{width}}".format(
                      "Available", self._size(mem_v.available), width=width) +
                  "\n\t{0:<10}: {1:>{width}} {2}%".format(
                      "Used",
                      self._size(mem_v.total - mem_v.available),
                      mem_v.percent,
                      width=width))
        mem_s = psutil.swap_memory()
        width = max([
            len(self._size(n))
            for n in [mem_s.total, mem_s.free, (mem_s.total - mem_s.free)]
        ])
        mem_ss = ("Swap Memory"
                  "\n\t{0:<6}: {1:>{width}}".format(
                      "Total", self._size(mem_s.total), width=width) +
                  "\n\t{0:<6}: {1:>{width}}".format(
                      "Free", self._size(mem_s.free), width=width) +
                  "\n\t{0:<6}: {1:>{width}} {2}%".format(
                      "Used",
                      self._size(mem_s.total - mem_s.free),
                      mem_s.percent,
                      width=width))

        # Open files
        open_f = psutil.Process().open_files()
        open_fs = "Open File Handles\n\t"
        if open_f:
            if hasattr(open_f[0], "mode"):
                open_fs += "\n\t".join(
                    ["{0} [{1}]".format(f.path, f.mode) for f in open_f])
            else:
                open_fs += "\n\t".join(["{0}".format(f.path) for f in open_f])
        else:
            open_fs += "None"

        # Disk usage
        disk_u = psutil.disk_usage(os.path.sep)
        width = max([
            len(self._size(n))
            for n in [disk_u.total, disk_u.free, disk_u.used]
        ])
        disk_us = (
            "Disk Usage"
            "\n\t{0:<6}: {1:>{width}}".format(
                "Total", self._size(disk_u.total), width=width) +
            "\n\t{0:<6}: {1:>{width}}".format(
                "Free", self._size(disk_u.free), width=width) +
            "\n\t{0:<6}: {1:>{width}} {2}%".format(
                "Used", self._size(disk_u.used), disk_u.percent, width=width))

        # Network
        net_io = psutil.net_io_counters()
        width = max([
            len(self._size(n)) for n in [net_io.bytes_sent, net_io.bytes_recv]
        ])
        net_ios = (
            "Network"
            "\n\t{0:<11}: {1:>{width}}".format(
                "Bytes sent", self._size(net_io.bytes_sent), width=width) +
            "\n\t{0:<11}: {1:>{width}}".format(
                "Bytes recv", self._size(net_io.bytes_recv), width=width))

        # Boot time
        boot_s = ("Boot Time"
                  "\n\t{0}".format(
                      datetime.datetime.fromtimestamp(
                          psutil.boot_time()).strftime("%Y-%m-%d %H:%M:%S")))

        # Output
        msg = ""
        if not args or args[0].lower() not in options:
            msg = "\n\n".join([
                cpu_cs, cpu_ps, cpu_ts, mem_vs, mem_ss, open_fs, disk_us,
                net_ios, boot_s
            ])
        elif args[0].lower() == 'cpu':
            msg = "\n" + "\n\n".join([cpu_cs, cpu_ps, cpu_ts])
        elif args[0].lower() == 'memory':
            msg = "\n" + "\n\n".join([mem_vs, mem_ss])
        elif args[0].lower() == 'file':
            msg = "\n" + open_fs
        elif args[0].lower() == 'disk':
            msg = "\n" + disk_us
        elif args[0].lower() == 'network':
            msg = "\n" + net_ios
        elif args[0].lower() == 'boot':
            msg = "\n" + boot_s
        await self._say(ctx, msg)
        return
예제 #47
0
 def __init__(self):
     self.last = ps.cpu_times()
예제 #48
0
# machineSpec; figure out a machine's specification, what hardware is here?
# This might grow to include some diagnostics, like exercise the HDD, ...
__author__ = 'dalem'

# Many current machine specifications can be found with psutil from here https://github.com/giampaolo/psutil
#TODO; how much memory
#TODO; what kind of processor
#TODO; performance whetstones counter

import os
import psutil
print "cpu times: " + str(psutil.cpu_times())
print "memory: " + str(psutil.TOTAL_PHYMEM)
print "disk: " + str(psutil.disk_usage(os.getcwd()))
예제 #49
0
# coding:utf-8
'''
date:2017-11-08
函数,模块和包,面对对象和设计模式,异常处理,正则表达式,系统脚本
'''

import datetime
from subprocess import PIPE

import psutil

print(psutil.cpu_times())
print(psutil.cpu_times())
print(psutil.cpu_times().user)
print(psutil.cpu_count())
print(psutil.cpu_count(logical=False))
print(psutil.cpu_times(percpu=True))

print(psutil.virtual_memory())
print(psutil.virtual_memory().total)
print(psutil.swap_memory())

print(psutil.disk_partitions())
print(psutil.disk_usage('/'))
print(psutil.disk_io_counters())
print(psutil.disk_io_counters(perdisk=True))

print(psutil.net_io_counters())
print(psutil.net_io_counters(pernic=True))

print(psutil.users())
예제 #50
0
def perform_handoff(handoff_data):
    '''Perform VM handoff
    @param handoff_data: object of HandoffDataSend
    @return None
    '''
    global _handoff_start_time  # for testing purpose
    time_start = time.time()
    _handoff_start_time[0] = time_start
    LOG.info("control_network\tupdate start time: %f" % _handoff_start_time[0])

    CPU_MONITORING = False
    if CPU_MONITORING:
        cpu_stat_start = psutil.cpu_times(percpu=True)
    process_controller = process_manager.get_instance()
    overlay_mode = handoff_data.overlay_mode
    if overlay_mode is None:
        NUM_CPU_CORES = 2  # set CPU affinity
        VMOverlayCreationMode.LIVE_MIGRATION_STOP = VMOverlayCreationMode.LIVE_MIGRATION_FINISH_USE_SNAPSHOT_SIZE
        overlay_mode = VMOverlayCreationMode.get_pipelined_multi_process_finite_queue(
            num_cores=NUM_CPU_CORES)
        overlay_mode.COMPRESSION_ALGORITHM_TYPE = Const.COMPRESSION_GZIP
        overlay_mode.COMPRESSION_ALGORITHM_SPEED = 1
        overlay_mode.MEMORY_DIFF_ALGORITHM = "none"
        overlay_mode.DISK_DIFF_ALGORITHM = "none"

    # set affinity of VM not to disturb the migration
    #p = psutil.Process()
    #assigned_core_list = p.cpu_affinity()
    #excluded_core_list = list(set(range(psutil.cpu_count())) - set(assigned_core_list))
    # for proc in psutil.process_iter():
    #    if proc.name().lower().startswith("cloudlet_"):
    #        proc.cpu_affinity(excluded_core_list)
    #        LOG.debug("affinity\tset affinity of %s to %s" % (proc.name, excluded_core_list))

    process_controller.set_mode(overlay_mode, handoff_data.handoff_addr)
    LOG.info("* LIVE MIGRATION STRATEGY: %d" %
             VMOverlayCreationMode.LIVE_MIGRATION_STOP)
    LOG.info("* Overlay creation configuration")
    LOG.info("  - %s" % str(handoff_data.options))
    LOG.debug("* Overlay creation mode start\n%s" % str(overlay_mode))
    LOG.debug("* Overlay creation mode end")

    # sanity check
    if (handoff_data.options is None) or (isinstance(handoff_data.options,
                                                     Options) == False):
        msg = "Given option is invalid: %s" % str(handoff_data.options)
        raise HandoffError(msg)
    (base_disk, base_mem, base_diskmeta, base_memmeta) =\
        handoff_data.base_vm_paths

    # start CPU Monitor
    if CPU_MONITORING:
        cpu_monitor = CPUMonitor()
        cpu_monitor.start()

    memory_snapshot_queue = multiprocessing.Queue(
        overlay_mode.QUEUE_SIZE_MEMORY_SNAPSHOT)
    residue_deltalist_queue = multiprocessing.Queue(
        maxsize=overlay_mode.QUEUE_SIZE_OPTIMIZATION)
    compdata_queue = multiprocessing.Queue(
        maxsize=overlay_mode.QUEUE_SIZE_COMPRESSION)
    vm_monitor = VMMonitor(handoff_data, base_disk, base_mem)
    monitoring_info = vm_monitor.get_monitoring_info()
    time_ss = time.time()
    LOG.debug("[time] serialized step (%f ~ %f): %f" %
              (time_start, time_ss, (time_ss - time_start)))

    # QEMU control thread
    qmp_thread = QmpThread(handoff_data.qmp_channel_path, process_controller,
                           memory_snapshot_queue, compdata_queue, overlay_mode,
                           handoff_data._monitor)
    qmp_thread.daemon = True

    # memory snapshotting thread
    memory_read_proc = save_mem_snapshot(
        handoff_data._conn,
        handoff_data._vm_instance,
        memory_snapshot_queue,
        fuse_stream_monitor=handoff_data._monitor)
    if overlay_mode.PROCESS_PIPELINED == False:
        if overlay_mode.LIVE_MIGRATION_STOP is not VMOverlayCreationMode.LIVE_MIGRATION_FINISH_ASAP:
            msg = "Use ASAP VM stop for pipelined approach for serialized processing.\n"
            msg += "Otherwise it won't fininsh at the memory dumping stage"
            raise HandoffError(msg)
        time.sleep(5)
        qmp_thread.start()
        _waiting_to_finish(process_controller, "MemoryReadProcess")

    # process for getting VM overlay
    dedup_proc = create_delta_proc(
        monitoring_info, handoff_data.options, overlay_mode, base_disk,
        base_mem, base_memmeta, handoff_data.basedisk_hashdict,
        handoff_data.basemem_hashdict, handoff_data._resumed_disk,
        memory_snapshot_queue, residue_deltalist_queue, process_controller)
    time_dedup = time.time()
    if overlay_mode.PROCESS_PIPELINED == False:
        _waiting_to_finish(process_controller, "DeltaDedup")

    # process for compression
    LOG.info("Compressing overlay blobs")
    compress_proc = compression.CompressProc(residue_deltalist_queue,
                                             compdata_queue, overlay_mode)
    compress_proc.start()
    time_dedup = time.time()
    if overlay_mode.PROCESS_PIPELINED == False:
        _waiting_to_finish(process_controller, "CompressProc")

    migration_url = urlsplit(handoff_data.handoff_addr)
    if migration_url.scheme == "tcp":
        from .stream_client import StreamSynthesisClient
        url_value = migration_url.netloc.split(":")
        if len(url_value) == 1:
            migration_dest_ip = url_value[0]
            migration_dest_port = VMOverlayCreationMode.HANDOFF_DEST_PORT_DEFAULT
        elif len(url_value) == 2:
            migration_dest_ip = url_value[0]
            migration_dest_port = url_value[1]
        resume_disk_size = os.path.getsize(handoff_data._resumed_disk)

        # wait until getting the memory snapshot size
        resume_memory_size = -1
        LOG.debug("waiting to get memory size")
        while resume_memory_size < 0:
            resume_memory_size = memory_read_proc.get_memory_snapshot_size()
        time_memory_snapshot_size = time.time()
        LOG.debug("[time] Getting memory snapshot size (%f~%f):%f" %
                  (time_start, time_memory_snapshot_size,
                   (time_memory_snapshot_size - time_start)))
        if overlay_mode.PROCESS_PIPELINED:
            qmp_thread.start()

        metadata = dict()
        metadata[Const.META_BASE_VM_SHA256] = handoff_data.basevm_sha256_hash
        metadata[Const.META_RESUME_VM_DISK_SIZE] = resume_disk_size
        metadata[Const.META_RESUME_VM_MEMORY_SIZE] = resume_memory_size
        time_network_start = time.time()
        client = StreamSynthesisClient(migration_dest_ip, migration_dest_port,
                                       metadata, compdata_queue,
                                       process_controller)
        client.start()
        client.join()
        cpu_stat_end = psutil.cpu_times(percpu=True)
        time_network_end = time.time()
        LOG.debug("[time] Network transmission (%f~%f):%f" %
                  (time_network_start, time_network_end,
                   (time_network_end - time_network_start)))
        process_manager.kill_instance()

        # 7. terminting
        if handoff_data._monitor is not None:
            handoff_data._monitor.terminate()
            handoff_data._monitor.join()
        handoff_data._vm_instance = None  # protecting malaccess to machine
        time_end = time.time()

        qmp_thread.join()
        migration_stop_command_time = qmp_thread.migration_stop_time
        vm_resume_time_at_dest = client.vm_resume_time_at_dest.value
        time_finish_transmission = client.time_finish_transmission.value
        LOG.debug("[time] migration stop time: %f" %
                  migration_stop_command_time)
        LOG.debug("[time] VM resume time at dest: %f" % vm_resume_time_at_dest)
        LOG.debug("[time] migration downtime: %f" %
                  (vm_resume_time_at_dest - migration_stop_command_time))
        LOG.debug("[time] Start ~ Finish tranmission (%f ~ %f): %f" %
                  (time_start, time_finish_transmission,
                   (time_finish_transmission - time_start)))
        LOG.debug("[time] Start ~ Finish migration (%f ~ %f): %f" %
                  (time_start, vm_resume_time_at_dest,
                   (vm_resume_time_at_dest - time_start)))
        if CPU_MONITORING:
            # measure CPU usage
            cpu_monitor.terminate()
            cpu_monitor.join()
            avg_cpu_usage = cpu_monitor.average_cpu_time(
                time_start, time_finish_transmission, assigned_core_list)
            LOG.debug("cpu_usage\t%f\taverage\t%s" %
                      (time.time(), avg_cpu_usage))
            # measrue CPU time
            cpu_user_time = 0.0
            cpu_sys_time = 0.0
            cpu_idle_time = 0.0
            for core_index in assigned_core_list:
                cpu_time_start = cpu_stat_start[core_index]
                cpu_time_end = cpu_stat_end[core_index]
                cpu_user_time += (cpu_time_end[0] - cpu_time_start[0])
                cpu_sys_time += (cpu_time_end[2] - cpu_time_start[2])
                cpu_idle_time += (cpu_time_end[3] - cpu_time_start[3])
            cpu_total_time = cpu_user_time + cpu_sys_time
            LOG.debug("cpu_usage\t%f\tostime\t%s\t%f\t%f %%(not accurate)" %
                      (time.time(), assigned_core_list, cpu_total_time, 100.0 *
                       cpu_total_time / (cpu_total_time + cpu_idle_time)))
        _handoff_start_time[0] = sys.maxsize
    elif migration_url.scheme == "file":
        residue_zipfile = str(migration_url.path)
        temp_compfile_dir = mkdtemp(prefix="cloudlet-comp-")
        synthesis_file = StreamSynthesisFile(handoff_data.basevm_sha256_hash,
                                             compdata_queue, temp_compfile_dir)
        synthesis_file.start()

        # wait until getting the memory snapshot size
        LOG.debug("waiting to get memory size")
        resume_memory_size = -1
        while resume_memory_size < 0:
            resume_memory_size = memory_read_proc.get_memory_snapshot_size()
            time.sleep(0.001)
        time_memory_snapshot_size = time.time()
        LOG.debug("[time] Getting memory snapshot size (%f~%f):%f" %
                  (time_start, time_memory_snapshot_size,
                   (time_memory_snapshot_size - time_start)))
        if overlay_mode.PROCESS_PIPELINED:
            qmp_thread.start()

        # wait to finish creating files
        synthesis_file.join()
        time_end_transfer = time.time()
        LOG.debug("[time] Time for finishing transferring (%f ~ %f): %f" %
                  (time_start, time_end_transfer,
                   (time_end_transfer - time_start)))

        overlay_info, overlay_files = synthesis_file.get_overlay_info()
        overlay_metapath = os.path.join(os.getcwd(), Const.OVERLAY_META)
        overlay_metafile = _generate_overlaymeta(
            overlay_metapath, overlay_info, handoff_data.basevm_sha256_hash,
            os.path.getsize(handoff_data._resumed_disk), resume_memory_size)

        # packaging VM overlay into a single zip file
        VMOverlayPackage.create(residue_zipfile, overlay_metafile,
                                overlay_files)

        # terminting
        qmp_thread.join()
        process_manager.kill_instance()
        memory_read_proc.finish()  # deallocate resources for snapshotting
        # 7. terminting
        if handoff_data._monitor is not None:
            handoff_data._monitor.terminate()
            handoff_data._monitor.join()
        handoff_data._vm_instance = None  # protecting malaccess to machine
        if os.path.exists(overlay_metafile):
            os.remove(overlay_metafile)
        if os.path.exists(temp_compfile_dir):
            shutil.rmtree(temp_compfile_dir)
        time_end = time.time()
        LOG.debug("[time] Total residue creation time (%f ~ %f): %f" %
                  (time_start, time_end, (time_end - time_start)))

        if CPU_MONITORING:
            cpu_monitor.terminate()
            cpu_monitor.join()
            avg_cpu_usage = cpu_monitor.average_cpu_time(
                time_start, time_end_transfer, assigned_core_list)
            LOG.debug("cpu_usage\t%f\taverage\t%s" %
                      (time.time(), avg_cpu_usage))
        _handoff_start_time[0] = sys.maxsize
    return None
예제 #51
0
# 实现系统监控,还可以跨平台使用,支持Linux/UNIX/OSX/Windows
import psutil

# 获取CPU信息

print(psutil.cpu_count())  # CPU逻辑数量

print(psutil.cpu_count(logical=False))  # CPU物理核心

# 统计CPU的用户/系统/空闲时间:
print(psutil.cpu_times())

# 每秒刷新一次,累计10次:
for i in range(1):
    # for i in range(10):
    print(psutil.cpu_percent(interval=1, percpu=True))

# 获取内存信息

# 物理内存
print('物理内存', psutil.virtual_memory())

# 交换内存
print('交换内存', psutil.swap_memory())

# 获取磁盘信息
print(psutil.disk_partitions())  # 磁盘分区信息

print(psutil.disk_usage('/'))  # 磁盘使用情况

print(psutil.disk_io_counters())  # 磁盘IO
예제 #52
0
#This program allows you to output your computer's current CPU usage in a readable format

#psutil is a library for retrieveing inormation about processes and system utilization
import psutil

#Every attribute of psutil represents seconds the CPU has spent in a given mode

#User is time spent by normal processes executing in user mode
userCPU = psutil.cpu_times().user

if userCPU < 60:
    print 'Time spent in normal processes: ', userCPU, 'sec'
else:
    print 'Time spent in normal processes: ', str(round(userCPU / 60,
                                                        2)), 'mins'

#Idle is time spent doing nothing
idleCPU = psutil.cpu_times().idle

if idleCPU < 60:
    print 'Time spent doing nothing: ', idleCPU, 'sec'
else:
    print 'Time spent doing nothing: ', str(round(idleCPU / 60, 2)), 'mins'

#System is time spent by processes executing in kernel mode
systemCPU = psutil.cpu_times().system

if systemCPU < 60:
    print 'Time spent executing in kernel mode: ', systemCPU, 'sec'
else:
    print 'Time spent executing in kernel mode: ', str(round(
예제 #53
0
def gather_stats(output_dir):
    now = int(time.time())
    fname = output_dir / f'{now}.csv'
    with open(fname, 'w') as f:
        writer = csv.writer(f)
        writer.writerow([
            'pid', 'name', 'user', 'cpu_user', 'cpu_sys', 'memory_rss',
            'memory_vms', 'docker', 'cmdline'
        ])

        for process in psutil.process_iter():
            try:
                with process.oneshot():
                    cpu = process.cpu_times()
                    mem = process.memory_info()
                    docker = is_docker(process.pid)
                    cmdline = " ".join(process.cmdline())
                    writer.writerow([
                        process.pid,
                        process.name(),
                        process.username(), cpu.user, cpu.system, mem.rss,
                        mem.vms, docker, cmdline
                    ])
            except:
                # got an exception, assume the process went away and then we
                # don't really care much
                get_logger().debug(
                    "exception gathering processing information, skipping process: %s",
                    sys.exc_info()[0])

        system_cpu = psutil.cpu_times()
        total_uptime = sum(system_cpu)
        writer.writerow([
            -1, "system_total", "system_total", total_uptime, -1,
            psutil.virtual_memory().total,
            psutil.swap_memory().total, False, ""
        ])

    # capture raw network stats to compare with MAP network collection
    fname_net = output_dir / f'{now}_net.csv'
    with open(fname_net, 'w') as f_net:
        writer_net = csv.writer(f_net)

        # explanation of stats
        # https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net-statistics
        stats = [
            'rx_bytes', 'rx_errors', 'rx_dropped', 'rx_fifo_errors',
            'rx_missed_errors', 'tx_bytes', 'tx_errors', 'tx_dropped',
            'tx_fifo_errors'
        ]

        header = ['interface']
        header.extend(stats)
        writer_net.writerow(header)

        sys_net = Path('/sys/class/net')
        for interface_dir in sys_net.iterdir():
            row = list()

            interface = interface_dir.name
            row.append(interface)

            for stat in stats:
                stats_file = interface_dir / f'statistics/{stat}'
                if stats_file.exists():
                    row.append(stats_file.read_text().strip())
                else:
                    row.append("")

            writer_net.writerow(row)

    # capture routing table
    fname_route = output_dir / f'{now}_routes.log'
    with open(fname_route, 'w') as f:
        subprocess.run(["ip", "route", "show"], stdout=f)
예제 #54
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.running = True
     self.max_buffer_len = 20
     self.max_data_buffer_len = int(config.WAITTIME / config.MONITOR_PERIOD)
     self.period_data = []  # data that was polled is stored here...
     self.data_lock = threading.Lock()
     self.previous_time = time.time()
     self.previous_network_stats = psutil.net_io_counters(pernic=True)
     psutil.cpu_percent()
     self.tasks_to_monitor = []
     self.tasks_lock = threading.Lock()
     self.task_data = {}
     self.past_disk_stats = psutil.disk_io_counters()
     self.past_cpu_stats = psutil.cpu_times()
     self.capacity = self.get_capacity()
     self.estimated_utilization = {
         'cpu': EWMA(int(config.WAITTIME / config.MONITOR_PERIOD), 0.8),
         'memory': EWMA(int(config.WAITTIME / config.MONITOR_PERIOD), 0.85),
         'network': EWMA(int(config.WAITTIME / config.MONITOR_PERIOD), 0.8)
     }
     self.profile = False
     # monitor all management processes
     self.ch_ttime = 0
     self.ch_systime = 0
     self.myproc = psutil.Process()
     self.myproc_stats = self.myproc.cpu_times()
     self.tasks_to_monitor.append(self.myproc.pid)
     self.task_data[self.myproc.pid] = []
     if config.MEMFS_PROCESS != -1:
         self.memfs_proc1 = psutil.Process(config.MEMFS_PROCESS)
         self.memfs_proc1.cpu_affinity([0, 1, 2])
         self.memfs_proc1_stats = self.memfs_proc1.cpu_times()
         try:
             self.task_data[config.MEMFS_PROCESS] = []
             self.tasks_to_monitor.append(config.MEMFS_PROCESS)
         except:
             self.memfs_proc1 = None
     else:
         self.memfs_proc1 = None
     if config.MEMFS2_PROCESS != -1:
         self.memfs_proc2 = []
         self.memfs_proc2_stats = []
         proc = psutil.Popen("ps -e | grep memcachefs",
                             shell=True,
                             stdout=PIPE,
                             stderr=PIPE)
         out, err = proc.communicate()
         for line in out.split('\n'):
             tokens = line.split(' ')
             if len(tokens) < 2:
                 continue
             if tokens[0] == '':
                 continue
             memfs_process = psutil.Process(int(tokens[0]))
             self.memfs_proc2.append(memfs_process)
             try:
                 self.memfs_proc2_stats.append(memfs_process.cpu_times())
                 self.task_data[tokens[0]] = []
                 self.tasks_to_monitor.append(tokens[0])
             except:
                 self.memfs_proc2 = []
                 self.memfs_proc2_stats = []
         self.max_buffer_len = self.max_buffer_len + \
             1 + len(self.memfs_proc2)
     else:
         self.memfs_proc2 = None
         self.memfs_proc2_stats = None
예제 #55
0
import psutil
import datetime
# 获取系统性能信息【CPU、内存、磁盘、网络】

## CPU信息
### User Time, 执行用户进程的时间百分比
### System Time, 执行内核进程和中断的时间百分比
### Wait IO, 由于IO等待而使CPU处于idle(空闲)状态的时间百分比
### Idle, CPU处于idle状态的时间百分比

# 使用cpu_times获取CPU完整信息,需要显示所有逻辑CPU信息,指定percpu=True即可
psutil.cpu_times()  # 几个之和
psutil.cpu_times(percpu=True)  # 单个显示
# 获取CPU的逻辑个数,默认logical为True
psutil.cpu_count()
# 获取CPU的物理个数
psutil.cpu_count(logical=False)

## 内存信息
### total 内存总数
### used 已使用内存数
### free 空闲内存数
### buffers 缓冲使用数
### cache 缓存使用数
### swap 交换分区使用数
psutil.virtual_memory()  # 获取内存完整信息
psutil.swap_memory()  # 获取SWAP分区信息

## 磁盘信息
### 磁盘利用率
psutil.disk_usage()
예제 #56
0
log_file = log.create_log_file(output_folder + "log.txt")

main_sim = simulator.Simulator()
main_sim.loader(sim_path, log_file)


log_file.write('Grid Size: {} - {} Items - {} Agents - {} Obstacles\n'.\
        format(main_sim.dim_w,len(main_sim.items),len(main_sim.agents),len(main_sim.obstacles)))
log.write_configurations(log_file, sim_configuration)
log_file.write('***** Initial map *****\n')
log.write_map(log_file, main_sim)

# ============= Simulation Initialization ==================
# 1. Log Variables Init
begin_time = time.time()
begin_cpu_time = psutil.cpu_times()
used_mem_before = psutil.virtual_memory().used

# 2. Sim estimation Init
polynomial_degree = 4
agents_parameter_estimation = []
agents_previous_step_info = []
search_tree = None
enemy_search_tree = None
enemy_action_prob = None

# try:

# 3. Ad hoc Agents
if main_sim.main_agent is not None:
    main_agent = main_sim.main_agent
예제 #57
0
 def timer():
     return sum(psutil.cpu_times())
def run_once_pool(data_dict, input_shape, pool_size, strides, padding,
                  max_pool_flag):

    model = get_only_pooling_model(input_shape,
                                   pool_size=pool_size,
                                   strides=strides,
                                   padding=padding,
                                   max_pool_flag=max_pool_flag)

    # 当前模型生成
    current_layer = model.layers[1]
    print('current_layer.name', current_layer.name)
    f_part = K.function(
        [current_layer.input, K.learning_phase()], [current_layer.output])

    # 创建输入变量
    input_shape = model.layers[1].input_shape[1:]
    print('input_shape ', input_shape)
    input_data = np.random.rand(*input_shape)
    input_data = [np.asarray(input_data).reshape((1, *input_shape))]

    # 输出大小
    output_shape = model.layers[1].output_shape[1:]
    print('output_shape ', output_shape)

    # 预先执行两次,第一次运行会有准备工作
    layer_out = f_part(input_data + [0])[0]
    layer_out = f_part(input_data + [0])[0]

    # 系统信息
    data = psutil.virtual_memory()
    # 内存总数
    mem_total = data.total / 1024 / 1024  # 总内存,单位为byte/ 1024 / 1024 = Mb
    # 内存空闲数
    mem_free = data.available / 1024 / 1024
    # cpu数
    cpu_count = psutil.cpu_count()
    # cpu 时间
    user_cpu_times, nice_cpu_times, system_cpu_times, idle_cpu_times = psutil.cpu_times(
    )
    # cpu利用率
    cpu_percent = psutil.cpu_percent(interval=1)

    used_time = 0.0
    for _ in range(repeats):
        start = time.time()
        layer_out = f_part(input_data + [0])[0]
        end = time.time()
        used_time += (end - start) * 1000

    used_time = used_time / repeats
    print('used time ', used_time)

    data_dict['label'].append(used_time)
    data_dict['mem_total'].append(mem_total)
    data_dict['mem_free'].append(mem_free)
    data_dict['cpu_count'].append(cpu_count)
    data_dict['cpu_percent'].append(cpu_percent)
    data_dict['user_cpu_times'].append(user_cpu_times)
    data_dict['nice_cpu_times'].append(nice_cpu_times)
    data_dict['system_cpu_times'].append(system_cpu_times)
    data_dict['idle_cpu_times'].append(idle_cpu_times)

    data_dict['input_width'].append(input_shape[0])
    data_dict['input_height'].append(input_shape[1])
    data_dict['input_channel'].append(input_shape[2])
    data_dict['output_width'].append(output_shape[0])
    data_dict['output_height'].append(output_shape[1])
    data_dict['output_channel'].append(output_shape[2])
    data_dict['pool_size_width'].append(pool_size[0])
    data_dict['pool_size_height'].append(pool_size[1])
    data_dict['strides_width'].append(strides[0])
    data_dict['strides_height'].append(strides[1])
    data_dict['padding_type'].append(padding)
    data_dict['max_pool_flag'].append(max_pool_flag)
예제 #59
0
    def get_current(self):
        """ Method for building an instance of NodeStatsSnapshot.
    It collects information about usage of main resource on the machine.

    Returns:
      An object of NodeStatsSnapshot with detailed explanation of resources used
      on the machine
    """
        utc_timestamp = time.mktime(datetime.utcnow().timetuple())
        private_ip = appscale_info.get_private_ip()

        # CPU usage
        cpu_times = psutil.cpu_times()
        cpu = NodeCPU(user=cpu_times.user,
                      system=cpu_times.system,
                      idle=cpu_times.idle,
                      percent=psutil.cpu_percent(),
                      count=psutil.cpu_count())

        # AvgLoad
        loadavg = NodeLoadAvg(*os.getloadavg())

        # Memory usage
        virtual = psutil.virtual_memory()
        memory = NodeMemory(total=virtual.total,
                            available=virtual.available,
                            used=virtual.used)

        # Swap usage
        swap_mem = psutil.swap_memory()
        swap = NodeSwap(total=swap_mem.total,
                        free=swap_mem.free,
                        used=swap_mem.used)

        # Disk usage
        partitions = psutil.disk_partitions()
        partitions_dict = {}
        for part in partitions:
            usage = psutil.disk_usage(part.mountpoint)
            partitions_dict[part.mountpoint] = NodePartition(total=usage.total,
                                                             used=usage.used,
                                                             free=usage.free)
        io_counters = psutil.disk_io_counters()
        disk_io = NodeDiskIO(read_count=io_counters.read_count,
                             write_count=io_counters.write_count,
                             read_bytes=io_counters.read_bytes,
                             write_bytes=io_counters.write_bytes,
                             read_time=io_counters.read_time,
                             write_time=io_counters.write_time)

        # Network usage
        network_io = psutil.net_io_counters()
        network = NodeNetwork(bytes_sent=network_io.bytes_sent,
                              bytes_recv=network_io.bytes_recv,
                              packets_sent=network_io.packets_sent,
                              packets_recv=network_io.packets_recv,
                              errin=network_io.errin,
                              errout=network_io.errout,
                              dropin=network_io.dropin,
                              dropout=network_io.dropout,
                              connections_num=len(psutil.net_connections()))

        stats = NodeStatsSnapshot(utc_timestamp=utc_timestamp,
                                  private_ip=private_ip,
                                  cpu=cpu,
                                  memory=memory,
                                  swap=swap,
                                  disk_io=disk_io,
                                  partitions_dict=partitions_dict,
                                  network=network,
                                  loadavg=loadavg)
        if time.time() - self.last_debug > LOCAL_STATS_DEBUG_INTERVAL:
            NodeStatsSource.last_debug = time.time()
            logging.debug(stats)
        return stats
예제 #60
0
파일: cpu.py 프로젝트: leafknode/Diamond
    def collect(self):
        """
        Collector cpu stats
        """

        def cpu_time_list():
            """
            get cpu time list
            """

            statFile = open(self.PROC, "r")
            timeList = statFile.readline().split(" ")[2:6]
            for i in range(len(timeList)):
                timeList[i] = int(timeList[i])
            statFile.close()
            return timeList

        def cpu_delta_time(interval):
            """
            Get before and after cpu times for usage calc
            """
            pre_check = cpu_time_list()
            time.sleep(interval)
            post_check = cpu_time_list()
            for i in range(len(pre_check)):
                post_check[i] -= pre_check[i]
            return post_check

        if os.access(self.PROC, os.R_OK):

            # If simple only return aggregate CPU% metric
            if str_to_bool(self.config['simple']):
                dt = cpu_delta_time(self.INTERVAL)
                cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
                self.publish('percent', str('%.4f' % cpuPct))
                return True

            results = {}
            # Open file
            file = open(self.PROC)

            ncpus = -1  # dont want to count the 'cpu'(total) cpu.
            for line in file:
                if not line.startswith('cpu'):
                    continue

                ncpus += 1
                elements = line.split()

                cpu = elements[0]

                if cpu == 'cpu':
                    cpu = 'total'
                elif not str_to_bool(self.config['percore']):
                    continue

                results[cpu] = {}

                if len(elements) >= 2:
                    results[cpu]['user'] = elements[1]
                if len(elements) >= 3:
                    results[cpu]['nice'] = elements[2]
                if len(elements) >= 4:
                    results[cpu]['system'] = elements[3]
                if len(elements) >= 5:
                    results[cpu]['idle'] = elements[4]
                if len(elements) >= 6:
                    results[cpu]['iowait'] = elements[5]
                if len(elements) >= 7:
                    results[cpu]['irq'] = elements[6]
                if len(elements) >= 8:
                    results[cpu]['softirq'] = elements[7]
                if len(elements) >= 9:
                    results[cpu]['steal'] = elements[8]
                if len(elements) >= 10:
                    results[cpu]['guest'] = elements[9]
                if len(elements) >= 11:
                    results[cpu]['guest_nice'] = elements[10]

            # Close File
            file.close()

            metrics = {}

            for cpu in results.keys():
                stats = results[cpu]
                for s in stats.keys():
                    # Get Metric Name
                    metric_name = '.'.join([cpu, s])
                    # Get actual data
                    if (str_to_bool(self.config['normalize'])
                            and cpu == 'total' and ncpus > 0):
                        metrics[metric_name] = self.derivative(
                            metric_name,
                            long(stats[s]),
                            self.MAX_VALUES[s]) / ncpus
                    else:
                        metrics[metric_name] = self.derivative(
                            metric_name,
                            long(stats[s]),
                            self.MAX_VALUES[s])

            # Check for a bug in xen where the idle time is doubled for guest
            # See https://bugzilla.redhat.com/show_bug.cgi?id=624756
            if self.config['xenfix'] is None or self.config['xenfix'] is True:
                if os.path.isdir('/host_proc/xen'):
                    total = 0
                    for metric_name in metrics.keys():
                        if 'cpu0.' in metric_name:
                            total += int(metrics[metric_name])
                    if total > 110:
                        self.config['xenfix'] = True
                        for mname in metrics.keys():
                            if '.idle' in mname:
                                metrics[mname] = float(metrics[mname]) / 2
                    elif total > 0:
                        self.config['xenfix'] = False
                else:
                    self.config['xenfix'] = False

            # Publish Metric Derivative
            for metric_name in metrics.keys():
                self.publish(metric_name,
                             metrics[metric_name])
            return True

        else:
            if not psutil:
                self.log.error('Unable to import psutil')
                self.log.error('No cpu metrics retrieved')
                return None

            cpu_time = psutil.cpu_times(True)
            cpu_count = len(cpu_time)
            total_time = psutil.cpu_times()
            for i in range(0, len(cpu_time)):
                metric_name = 'cpu' + str(i)
                self.publish(metric_name + '.user',
                             self.derivative(metric_name + '.user',
                                             cpu_time[i].user,
                                             self.MAX_VALUES['user']))
                if hasattr(cpu_time[i], 'nice'):
                    self.publish(metric_name + '.nice',
                                 self.derivative(metric_name + '.nice',
                                                 cpu_time[i].nice,
                                                 self.MAX_VALUES['nice']))
                self.publish(metric_name + '.system',
                             self.derivative(metric_name + '.system',
                                             cpu_time[i].system,
                                             self.MAX_VALUES['system']))
                self.publish(metric_name + '.idle',
                             self.derivative(metric_name + '.idle',
                                             cpu_time[i].idle,
                                             self.MAX_VALUES['idle']))

            metric_name = 'total'
            self.publish(metric_name + '.user',
                         self.derivative(metric_name + '.user',
                                         total_time.user,
                                         self.MAX_VALUES['user'])
                         / cpu_count)
            if hasattr(total_time, 'nice'):
                self.publish(metric_name + '.nice',
                             self.derivative(metric_name + '.nice',
                                             total_time.nice,
                                             self.MAX_VALUES['nice'])
                             / cpu_count)
            self.publish(metric_name + '.system',
                         self.derivative(metric_name + '.system',
                                         total_time.system,
                                         self.MAX_VALUES['system'])
                         / cpu_count)
            self.publish(metric_name + '.idle',
                         self.derivative(metric_name + '.idle',
                                         total_time.idle,
                                         self.MAX_VALUES['idle'])
                         / cpu_count)

            return True

        return None