Example #1
0
    def __set_bases(self):
        """
        Calculate base stats for I/O at start of the node.
        Used to calculate difference to get per/s stats.
        """
        psutil.cpu_percent()
        psutil.cpu_percent(percpu=True)

        for interface in psutil.net_io_counters(True):

            netint = psutil.net_io_counters(True)[interface]
            total_bytes = netint.bytes_recv + netint.bytes_sent
            total_packages = netint.packets_sent + netint.packets_recv

            self.__bandwidth_base[interface] = total_bytes
            self.__msg_freq_base[interface] = total_packages

        dev_names = []
        for disk in psutil.disk_partitions():
            if all(['cdrom' not in disk.opts, 'sr' not in disk.device]):
                dev_names.append(disk.device)

        for key in psutil.disk_io_counters(True):
            if key in dev_names:
                disk = psutil.disk_io_counters(True)[key]
                self.__disk_read_base[key] = disk.read_bytes
                self.__disk_write_base[key] = disk.write_bytes
Example #2
0
def loadMon(duration,interval,emulationID,emulationName,emuStartTime):
    
    HOMEPATH= Library.getHomepath()
    emulationName=str(emulationName)
    interval=int(interval)
    
    '''
    starting cpu monitoring in the loop
    '''
    iterationsNo=int(duration)/int(interval)
   
    try:
        f = open(HOMEPATH+"/logs/"+str(emulationID)+"-"+str(emulationName)+"-res"+"_"+str(emuStartTime)+".csv", 'a')    
        f.write(emulationName+";\nCountdown;Time;CPU(%);MEM(%);IOread(bytes);IOwrite(bytes);NET(bytes_sent)\n")
        #start time
        initTime=time.time()
        while iterationsNo !=0:
            CPU=str(psutil.cpu_percent(interval, False))
            #MEM=str(psutil.virtual_memory().percent)
            MEM=str(psutil.avail_virtmem())
            IOr=str(psutil.disk_io_counters().read_time)
            IOw=str(psutil.disk_io_counters().write_time)
            NET=str(psutil.network_io_counters(False).bytes_sent)

            #print (emulationName+";\nTime;CPU(%);MEM(%);IOread(bytes);IOwrite(bytes);NET(bytes_sent)\n"+str(time.time())+";"+CPU+";"+MEM+";"+IOr+";"+IOw+";"+NET)
            probeTime=time.time()-initTime
            timeStamp=dt.now()
            
            f.write(str(int(probeTime))+";"+str(timeStamp.strftime("%Y-%m-%d %H:%M:%S.%f"))+";"+CPU+";"+MEM+";"+IOr+";"+IOw+";"+NET+"\n")

            iterationsNo=iterationsNo-1
    except Exception,e:
        print "Unable to create log file\nError: ",e
Example #3
0
def getSI(val=None):
    """
    Récupération de données système ajoutées dans uns liste

    Arguments:
    val -- métriques à acquérir
    """

    if val == None:
        val = dict()

        val["time"] = []
        val["mem"] = []
        val["swap"] = []
        val["io_read"] = []
        val["io_write"] = []
        val["net_sent"] = []
        val["net_recv"] = []
        val["cpu"] = []

    val["time"] += [time.time()]
    val["mem"] += [psutil.virtual_memory()[2]]
    val["swap"] += [psutil.swap_memory()[3]]
    val["io_read"] += [psutil.disk_io_counters(perdisk=False)[2]]
    val["io_write"] += [psutil.disk_io_counters(perdisk=False)[3]]
    val["net_sent"] += [psutil.network_io_counters(pernic=False)[0]]
    val["net_recv"] += [psutil.network_io_counters(pernic=False)[1]]
    val["cpu"] += [psutil.cpu_percent(interval=0.8)]

    return val
def test_workers(c, s, a, b):
    d = workers(s)

    assert json.loads(json.dumps(d)) == d

    assert 0 <= d[a.ip]['cpu'] <= 100
    assert 0 <= d[a.ip]['memory']
    assert 0 < d[a.ip]['memory_percent'] < 100
    assert set(map(int, d[a.ip]['ports'])) == {a.port, b.port}
    assert d[a.ip]['processing'] == {}
    # assert d[a.ip]['last-seen'] > 0

    L = c.map(div, range(10), range(10))
    yield _wait(L)

    assert 0 <= d[a.ip]['cpu'] <= 100
    assert 0 <= d[a.ip]['memory']
    assert 0 < d[a.ip]['memory_percent'] < 100
    assert set(map(int, d[a.ip]['ports'])) == {a.port, b.port}
    assert d[a.ip]['processing'] == {}
    try:
        assert 0 <= d[a.ip]['disk-read']
        assert 0 <= d[a.ip]['disk-write']
    except KeyError:
        import psutil
        with pytest.raises(RuntimeError):
            psutil.disk_io_counters()

    assert 0 <= d[a.ip]['network-send']
    assert 0 <= d[a.ip]['network-recv']
Example #5
0
def api_per_disk_io(interval=1):
    try:
        before_per_disk_io = psutil.disk_io_counters(perdisk=True)
        sleep(interval)
        after_per_disk_io = psutil.disk_io_counters(perdisk=True)

        disks = list()
        for name in after_per_disk_io:
            disk_after = after_per_disk_io[name]
            disk_before = before_per_disk_io[name]
            read_per = disk_after.read_bytes - disk_before.read_bytes
            write_per = disk_after.write_bytes - disk_before.write_bytes
            total = read_per + write_per

            disks.append({
                'name': name,
                'read': read_per / interval,
                'write': write_per / interval,
                'total': total / interval
            })

        return jsonify({
            'status': True,
            'data': disks
        })
    except Exception as e:
        return jsonify({
            'status': False,
            'err': e.message
        })
Example #6
0
def get_disk_iostat(device=None):
	'''
	wirte_time, read_time is in milliseconds
	'''
	if not device:
		return ps.disk_io_counters(perdisk=False)
	return ps.disk_io_counters(perdisk=True).get(device)
Example #7
0
    def __get_resource_stats(self):
        """
        Get network and disk counters
        :return: tuple
        """
        if not self.__counters_ts:
            self.__disk_counters = psutil.disk_io_counters()
            self.__net_counters = psutil.net_io_counters()
            self.__counters_ts = datetime.datetime.now()
            time.sleep(0.2)  # small enough for human, big enough for machine

        now = datetime.datetime.now()
        interval = (now - self.__counters_ts).total_seconds()

        net = psutil.net_io_counters()
        tx_bytes = (net.bytes_sent - self.__net_counters.bytes_sent) / interval
        rx_bytes = (net.bytes_recv - self.__net_counters.bytes_recv) / interval
        self.__net_counters = net

        disk = psutil.disk_io_counters()
        dru = (disk.read_bytes - self.__disk_counters.read_bytes) / interval
        dwu = (disk.write_bytes - self.__disk_counters.write_bytes) / interval
        self.__disk_counters = disk

        self.__counters_ts = now
        return rx_bytes, tx_bytes, dru, dwu
Example #8
0
def get_disk_state():
    tot_before = psutil.disk_io_counters()
    # sleep some time  
    time.sleep(interval)  
    tot_after = psutil.disk_io_counters() 
    return {"disk_read": bytes2human((tot_after.read_bytes - tot_before.read_bytes)/ interval), 
            "disk_write": bytes2human((tot_after.write_bytes - tot_before.write_bytes)/ interval)}
Example #9
0
    def test_disk_io_counters(self):
        def check_ntuple(nt):
            self.assertEqual(nt[0], nt.read_count)
            self.assertEqual(nt[1], nt.write_count)
            self.assertEqual(nt[2], nt.read_bytes)
            self.assertEqual(nt[3], nt.write_bytes)
            if not (OPENBSD or NETBSD):
                self.assertEqual(nt[4], nt.read_time)
                self.assertEqual(nt[5], nt.write_time)
                if LINUX:
                    self.assertEqual(nt[6], nt.read_merged_count)
                    self.assertEqual(nt[7], nt.write_merged_count)
                    self.assertEqual(nt[8], nt.busy_time)
                elif FREEBSD:
                    self.assertEqual(nt[6], nt.busy_time)
            for name in nt._fields:
                assert getattr(nt, name) >= 0, nt

        ret = psutil.disk_io_counters(perdisk=False)
        assert ret is not None, "no disks on this system?"
        check_ntuple(ret)
        ret = psutil.disk_io_counters(perdisk=True)
        # make sure there are no duplicates
        self.assertEqual(len(ret), len(set(ret)))
        for key in ret:
            assert key, key
            check_ntuple(ret[key])
Example #10
0
File: base.py Project: mdavid/packs
def check_diskio():
    dm = False
    disk_map = {}
    try:
        # total io counters
        diskio_all = psutil.disk_io_counters()
        for k, v in diskio_all._asdict().iteritems():
            disk_map["disk." + k] = v
        # per disk io counters
        diskio_per_disk = psutil.disk_io_counters(perdisk=True)
        for device, details in diskio_per_disk.iteritems():
            for k, v in diskio_per_disk[device]._asdict().iteritems():
                disk_map["disk." + device.lower() + "." + k] = v
    except RuntimeError:  # Windows needs disk stats turned on with 'diskperf -y'
        pass
    # check for any device mapper partitions
    for partition in psutil.disk_partitions():
        if '/dev/mapper' in partition.device:
            dm = True
    # per device mapper friendly name io counters
    if dm:
        device_mapper = {}
        for name in os.listdir('/dev/mapper'):
            path = os.path.join('/dev/mapper', name)
            if os.path.islink(path):
                device_mapper[os.readlink(os.path.join('/dev/mapper', name)).replace('../', '')] = name
        for device, details in diskio_per_disk.iteritems():
            for k, v in diskio_per_disk[device]._asdict().iteritems():
                if device in device_mapper:
                    disk_map["disk." + device_mapper[device] + "." + k] = v
    return disk_map
    def collect(self):
        # Collect metrics for all devices
        if self._sum:
            try:
                curr = psutil.disk_io_counters(perdisk=False)
            except:
                # Not enough permissions
                curr = self._last_sum = None
            if self._last_sum:
                self._construct('sum', curr, self._last_sum)
            self._last_sum = curr

        # Collect metrics for each individual device
        if self._all or self._devices:
            try:
                curr_all = psutil.disk_io_counters(perdisk=True)
            except:
                # Typically not enough permissions
                curr_all = self._last = None
            if self._last:
                for curr_device in curr_all:
                    if self._all or curr_device in self._devices:
                        try:
                            curr = curr_all[curr_device]
                            last = self._last[curr_device]
                        except KeyError:
                            continue
                        self._construct(curr_device, curr, last)
            self._last = curr_all
Example #12
0
    def test_disk_io_counters(self):
        def check_ntuple(nt):
            self.assertEqual(nt[0], nt.read_count)
            self.assertEqual(nt[1], nt.write_count)
            self.assertEqual(nt[2], nt.read_bytes)
            self.assertEqual(nt[3], nt.write_bytes)
            self.assertEqual(nt[4], nt.read_time)
            self.assertEqual(nt[5], nt.write_time)
            assert nt.read_count >= 0, nt
            assert nt.write_count >= 0, nt
            assert nt.read_bytes >= 0, nt
            assert nt.write_bytes >= 0, nt
            assert nt.read_time >= 0, nt
            assert nt.write_time >= 0, nt

        ret = psutil.disk_io_counters(perdisk=False)
        check_ntuple(ret)
        ret = psutil.disk_io_counters(perdisk=True)
        # make sure there are no duplicates
        self.assertEqual(len(ret), len(set(ret)))
        for key in ret:
            assert key, key
            check_ntuple(ret[key])
            if LINUX and key[-1].isdigit():
                # if 'sda1' is listed 'sda' shouldn't, see:
                # https://github.com/giampaolo/psutil/issues/338
                while key[-1].isdigit():
                    key = key[:-1]
                self.assertNotIn(key, ret.keys())
Example #13
0
    def test_disk_io_counters(self):
        def check_ntuple(nt):
            self.assertEqual(nt[0], nt.read_count)
            self.assertEqual(nt[1], nt.write_count)
            self.assertEqual(nt[2], nt.read_bytes)
            self.assertEqual(nt[3], nt.write_bytes)
            if not (OPENBSD or NETBSD):
                self.assertEqual(nt[4], nt.read_time)
                self.assertEqual(nt[5], nt.write_time)
                if LINUX:
                    self.assertEqual(nt[6], nt.read_merged_count)
                    self.assertEqual(nt[7], nt.write_merged_count)
                    self.assertEqual(nt[8], nt.busy_time)
                elif FREEBSD:
                    self.assertEqual(nt[6], nt.busy_time)
            for name in nt._fields:
                assert getattr(nt, name) >= 0, nt

        ret = psutil.disk_io_counters(perdisk=False)
        check_ntuple(ret)
        ret = psutil.disk_io_counters(perdisk=True)
        # make sure there are no duplicates
        self.assertEqual(len(ret), len(set(ret)))
        for key in ret:
            assert key, key
            check_ntuple(ret[key])
            if LINUX and key[-1].isdigit():
                # if 'sda1' is listed 'sda' shouldn't, see:
                # https://github.com/giampaolo/psutil/issues/338
                while key[-1].isdigit():
                    key = key[:-1]
                self.assertNotIn(key, ret.keys())
Example #14
0
    def disk_io_counters(self):
        """ disk io counters """

        real_block_devs = host.block_devices()
        disk_counters = {'__all__': psutil.disk_io_counters(perdisk=False)}
        disk_counters.update(psutil.disk_io_counters(perdisk=True))

        simple_metrics = {
            'write_count': ['system.io.iops_w', 1, self.statsd.incr],
            'write_bytes': ['system.io.kbs_w', 1024, self.statsd.incr],
            'read_count': ['system.io.iops_r', 1, self.statsd.incr],
            'read_bytes': ['system.io.kbs_r', 1024, self.statsd.incr],
        }

        complex_metrics = {
            'write_time': ['system.io.wait_w', 1, self.statsd.gauge],
            'read_time': ['system.io.wait_r', 1, self.statsd.gauge],
        }

        for disk, io in disk_counters.iteritems():
            # do not process virtual devices
            disk_is_physical = False
            for real_dev_name in real_block_devs:
                if disk.startswith(real_dev_name):
                    disk_is_physical = True
            if not disk_is_physical:
                continue

            for method, description in simple_metrics.iteritems():
                new_stamp, new_value = time.time(), getattr(io, method)
                prev_stamp, prev_value = self.previous_values.get(disk, {}).get(method, [None, None])

                if prev_stamp and new_value >= prev_value:
                    metric_name, value_divider, stat_func = description
                    delta_value = (new_value - prev_value) / value_divider
                    metric_full_name = metric_name if disk == '__all__' else '%s|%s' % (metric_name, disk)
                    stat_func(metric_full_name, delta_value)

                    if method == 'write_count':
                        complex_metrics['write_time'][1] = delta_value
                    elif method == 'read_count':
                        complex_metrics['read_time'][1] = delta_value

                self.previous_values[disk][method] = [new_stamp, new_value]

            for method, description in complex_metrics.iteritems():
                new_stamp, new_value = time.time(), getattr(io, method)
                prev_stamp, prev_value = self.previous_values.get(disk, {}).get(method, [None, None])

                if prev_stamp:
                    metric_name, value_divider, stat_func = description
                    if value_divider:
                        delta_value = (new_value - prev_value) / float(value_divider)
                    else:
                        delta_value = 0
                    metric_full_name = metric_name if disk == '__all__' else '%s|%s' % (metric_name, disk)
                    stat_func(metric_full_name, delta_value)

                self.previous_values[disk][method] = [new_stamp, new_value]
Example #15
0
 def _disk_monitor(self):
     """
         Bytes read or write in a disk
     """
     read = ps.disk_io_counters()[2]
     write = ps.disk_io_counters()[3]
     self._print("Read: " +str(read)+ ", Write: " + str(write))
     self.file.write(str(read)+";"+str(write)+"\n")
Example #16
0
 def _innner():
     a=psutil.disk_io_counters(perdisk=False)
     time.sleep(TASK_INTERVAL)
     b=psutil.disk_io_counters()
     read_iops = (b.read_count - a.read_count)/TASK_INTERVAL
     write_iops = (b.write_count - a.write_count)/TASK_INTERVAL
     read_rate = (b.read_bytes - a.read_bytes)/TASK_INTERVAL/1024/1024
     write_rate = (b.write_bytes - a.write_bytes)/TASK_INTERVAL/1024/1024
Example #17
0
	def disk_monitor(self, f):
		"""
			Bytes read or write in a disk
		"""
		read = ps.disk_io_counters()[2]
		write = ps.disk_io_counters()[3]
		print("Disk Read: " +str(read)+ ", Write: " + str(write))
		f.write(str(read)+","+str(write)+"\n")
Example #18
0
 def test_disk_io_counters_no_disks(self):
     # Emulate a case where no disks are installed, see:
     # https://github.com/giampaolo/psutil/issues/1062
     with mock.patch('psutil._psplatform.disk_io_counters',
                     return_value={}) as m:
         self.assertIsNone(psutil.disk_io_counters(perdisk=False))
         self.assertEqual(psutil.disk_io_counters(perdisk=True), {})
         assert m.called
Example #19
0
def get_disk_io():
    a=psutil.disk_io_counters()
    time.sleep(timeaa)
    b=psutil.disk_io_counters()
    read_iops = (b.read_count - a.read_count)/timeaa
    write_iops = (b.write_count - a.write_count)/timeaa
    read_rate = (b.read_bytes - a.read_bytes)/timeaa/1024/1024
    write_rate = (b.write_bytes - a.write_bytes)/timeaa/1024/1024
    print read_iops,write_iops,read_rate,write_rate
Example #20
0
def get_diskio_rate():
    diskio_ti = float(cf.get("collecting time interval", "diskio_ti"))
    read_io_old = psutil.disk_io_counters().read_bytes
    write_io_old = psutil.disk_io_counters().write_bytes
    time.sleep(diskio_ti)
    read_io_new = psutil.disk_io_counters().read_bytes
    write_io_new = psutil.disk_io_counters().write_bytes
    pgpgin_rate = (float(write_io_new) - write_io_old) / diskio_ti / 1024
    pgpgout_rate = (float(read_io_new) - read_io_old) / diskio_ti / 1024
    return {"pgpgin": float("%0.3f" % pgpgin_rate), "pgpgout": float("%0.3f" % pgpgout_rate)}
Example #21
0
def getDiskIOInfo():
    disks_before = psutil.disk_io_counters()
    time.sleep(1)
    disks_after = psutil.disk_io_counters()
    disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
    disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes
    wio = str( disks_write_per_sec)
    rio = str( disks_read_per_sec)
    w = '\"diskwrite\":\"' + wio + '\",\"' +"diskread\":\""+rio +'\",'
    return w
Example #22
0
def init(interval):
    global transferRate
    while 1:
        t0 = disk_io_counters()
        #Wait a moment to track the diff.
        time.sleep(interval)
        t1 = disk_io_counters()

        #Calculate kb/s
        transferRate["reads"]  = ((t1.read_bytes   - t0.read_bytes )  // (interval + 1)) // 1024
        transferRate["writes"] = ((t1.write_bytes  - t0.write_bytes)  // (interval + 1)) // 1024
def updateDB():
    '''
        updates the db with CPU details
    '''
    timestamp = time.time()
    ioreadusage = psutil.disk_io_counters().read_count
    iowriteusage = psutil.disk_io_counters().write_count
    memoryusage = psutil.used_phymem()
    cpupercent = psutil.cpu_percent()
    cpuDetails  = CPUStatistics(timestamp=timestamp,ioreadusage=ioreadusage,iowriteusage=iowriteusage,memoryusage=memoryusage,cpupercent=cpupercent)
    cpuDetails.save()
Example #24
0
def get_stats(name, nic):
    global prev_recv
    global prev_sent
    global prev_read
    global prev_write

    with open('/proc/uptime', 'r') as f:
        uptime_seconds = float(f.readline().split()[0])

    stats = {'date': datetime.datetime.now().isoformat(), 'name': name,
             'cpu': psutil.cpu_percent(interval=None, percpu=True), 'cpu_count': psutil.cpu_count(),
             'cpu_ctx_switches': psutil.cpu_stats().ctx_switches, 'cpu_interrupts': psutil.cpu_stats().interrupts,
             'ram': psutil.virtual_memory().percent,
             'ram-available': psutil.virtual_memory().available, 'ram-used': psutil.virtual_memory().used,
             'swap': psutil.swap_memory().percent, 'swap-total': psutil.swap_memory().total,
             'swap-used': psutil.swap_memory().used, 'disk_io_read': psutil.disk_io_counters().read_bytes,
             'disk_io_write': psutil.disk_io_counters().write_bytes, 'disk_total': psutil.disk_usage('/').total,
             'disk_used': psutil.disk_usage('/').used,
             'uptime': uptime_seconds}

    nic_list = psutil.net_io_counters(pernic=True)
    nic = nic_list[nic]

    stats['packets_sent'] = [nic.packets_sent, nic.errout]
    stats['packets_recv'] = [nic.packets_recv, nic.errin]
    stats['bytes_recv'] = nic.bytes_recv
    stats['bytes_sent'] = nic.bytes_sent

    if prev_recv != -1:
        stats['dl_rate'] = stats['bytes_recv'] - prev_recv
    else:
        stats['dl_rate'] = 0
    prev_recv = stats['bytes_recv']

    if prev_sent != -1:
        stats['ul_rate'] = stats['bytes_sent'] - prev_sent
    else:
        stats['ul_rate'] = 0
    prev_sent = stats['bytes_sent']

    if prev_read != -1:
        stats['disk_read_rate'] = stats['disk_io_read'] - prev_read
    else:
        stats['disk_read_rate'] = 0
    prev_read = stats['disk_io_read']

    if prev_read != -1:
        stats['disk_write_rate'] = stats['disk_io_write'] - prev_write
    else:
        stats['disk_write_rate'] = 0
    prev_write = stats['disk_io_write']
    return stats
def send_stats(client):
    last_disk_io = psutil.disk_io_counters()
    last_net_io = psutil.net_io_counters()

    while True:
        memory = psutil.phymem_usage()
        disk = psutil.disk_usage("/")
        disk_io = psutil.disk_io_counters()
        disk_io_change = io_change(last_disk_io, disk_io)
        net_io = psutil.net_io_counters()
        net_io_change = io_change(last_net_io, net_io)
        last_disk_io = disk_io
        last_net_io = net_io

        gauges = {
            "memory.used": memory.used,
            "memory.free": memory.free,
            "memory.percent": memory.percent,
            "cpu.percent": psutil.cpu_percent(),
            "load": os.getloadavg()[0],
            "disk.size.used": disk.used,
            "disk.size.free": disk.free,
            "disk.size.percent": disk.percent,
            "disk.read.bytes": disk_io_change["read_bytes"],
            "disk.read.time": disk_io_change["read_time"],
            "disk.write.bytes": disk_io_change["write_bytes"],
            "disk.write.time": disk_io_change["write_time"],
            "net.in.bytes": net_io_change["bytes_recv"],
            "net.in.errors": net_io_change["errin"],
            "net.in.dropped": net_io_change["dropin"],
            "net.out.bytes": net_io_change["bytes_sent"],
            "net.out.errors": net_io_change["errout"],
            "net.out.dropped": net_io_change["dropout"],
        }

        thresholds = {
            "memory.percent": 80,
            "disk.size.percent": 90,
            "queue.pending": 20000,
            "load": 20,
        }

        for name, value in gauges.items():
            print(name, value)
            client.gauge(name, value)
            threshold = thresholds.get(name, None)
            if threshold is not None and value > threshold:
                bits = (threshold, name)
                message = "Threshold of %s reached for %s" % bits
                print(message)

        time.sleep(1)
Example #26
0
 def __init__(self):
     self.nic='eth0'
     self.disk_part='/'
     self.pid='0'
     self.cpulcnt=ps.cpu_count(logical=True)
     self.cpupcnt=ps.cpu_count(logical=False)
     self.cpupercent=ps.cpu_times_percent(percpu=False)
     self.memtotal=ps.virtual_memory()
     self.diskconf=ps.disk_partitions()
     self.diskio=ps.disk_io_counters(perdisk=False)
     self.diskio2=ps.disk_io_counters(perdisk=True)
     self.swap=ps.swap_memory()
     self.netio_part=ps.net_io_counters(pernic=True)
Example #27
0
def get_diskio():
    times = 5
    read = 0
    write = 0
    for i in range(0,times):
        m_diskio = psutil.disk_io_counters(perdisk=False)
        time.sleep(1)
        m_diskio2 = psutil.disk_io_counters(perdisk=False)
        read = read + (m_diskio2.read_bytes - m_diskio.read_bytes)
        write = write + (m_diskio2.write_bytes - m_diskio.write_bytes)
    read = read/times
    write = write/times
    return read,write
Example #28
0
 def GetDiskIOInterval(self,interval=5):
     diskinfo = psutil.disk_io_counters()
     time.sleep(interval)
     diskinfo2 = psutil.disk_io_counters()
     
     rc = diskinfo2[0]-diskinfo[0]
     wc = diskinfo2[1]-diskinfo[1]
     rb = diskinfo2[2]-diskinfo[2]
     wb = diskinfo2[3]-diskinfo[3]
     
     sdisk = collections.namedtuple('intervaldiskio','read_count write_count read_bytes write_bytes')
     
     return sdisk(read_count=rc, write_count=wc, read_bytes=rb, write_bytes=wb)
Example #29
0
    def collect(self):
        """
        Collect disk IO stats.
        """
        disk_before = psutil.disk_io_counters()
        time.sleep(1)
        disk_after = psutil.disk_io_counters()

        gauge = statsd.Gauge('.'.join([self.cfg['default']['hostname'], self.path]), self.conn)
        gauge.send('read_count', math.fabs(disk_after.read_count - disk_before.read_count))
        gauge.send('write_count', math.fabs(disk_after.write_count - disk_before.write_count))
        gauge.send('read_bytes', math.fabs(disk_after.read_bytes - disk_before.read_bytes))
        gauge.send('write_bytes', math.fabs(disk_after.write_bytes - disk_before.write_bytes))
Example #30
0
 def get_io_info(self, get=None):
     io_disk = psutil.disk_io_counters()
     ioTotal = {}
     ioTotal['write'] = self.get_io_write(io_disk.write_bytes)
     ioTotal['read'] = self.get_io_read(io_disk.read_bytes)
     return ioTotal
Example #31
0
def diskinit(self):

    self.disklist = []
    self.disksize = []
    try:
        p = popen('lsblk -d | grep -e ^NAME -e disk')
        partitions = p.readlines()
        p.close()
        for parts in partitions:
            tempparts = parts.split()
            if 'NAME' not in tempparts[0] and 'zram' not in tempparts[0]:
                self.disklist.append(tempparts[0])
                self.disksize.append(tempparts[3])
                print(tempparts[0])
    except Exception as e:
        print(f"Failed to get Disks: {e}")

    self.diskWidgetList = {}
    self.diskstate1 = []
    self.diskActiveArray = []
    self.diskReadArray = []
    self.diskWriteArray = []
    self.numOfDisks = len(self.disklist)

    # partitions
    self.diskPartitions = {}
    self.diskListStores = {}
    self.diskListStoreItrs = {}
    partitions = ps.disk_partitions()

    for i in range(0, self.numOfDisks):
        self.diskWidgetList[i] = diskTabWidget()
        self.performanceStack.add_titled(self.diskWidgetList[i],
                                         f'page{self.stack_counter}',
                                         'Disk' + str(i))
        self.stack_counter += 1
        self.diskWidgetList[i].disktextlabel.set_text(self.disklist[i])
        self.diskWidgetList[i].diskinfolabel.set_text(self.disksize[i])
        disktemp = ps.disk_io_counters(perdisk=True)
        self.diskt1 = time()
        for drives in disktemp:
            if drives == self.disklist[i]:
                self.diskstate1.append(disktemp[drives])

        # partition info
        self.diskPartitions[i] = []
        for part in partitions:
            if self.disklist[i] in part[0]:
                self.diskPartitions[i] += [part]
        ## for treeview of disk usage
        self.diskListStores[i] = g.ListStore(str, str, str, str, str, int,
                                             bool)
        self.diskListStoreItrs[i] = []
        for part in self.diskPartitions[i]:
            temp = ps.disk_usage(part[1])
            itr = self.diskListStores[i].append([
                part[0], part[1], part[2],
                byte_to_human(temp[0], persec=False),
                byte_to_human(temp[1], persec=False), temp[3], False
            ])
            self.diskListStoreItrs[i].append(itr)

        self.diskWidgetList[i].diskUsagesTreeView.set_model(
            self.diskListStores[i])

        for k, col in enumerate(
            ['Device', 'MountPoint', 'Type', 'Total', 'Used']):
            renderer = g.CellRendererText()
            if col == 'Used':
                column = g.TreeViewColumn(col)
                progRenderer = g.CellRendererProgress()
                # progRenderer.props.text='50%'
                # progRenderer.props.fraction=0.5
                column.pack_start(renderer, False)
                column.add_attribute(renderer, "text", 4)
                column.pack_start(progRenderer, False)
                column.add_attribute(progRenderer, "value", 5)
                # column=g.TreeViewColumn(col,progRenderer,value=5,inverted=6)

            else:
                column = g.TreeViewColumn(col, renderer, text=k)

            column.set_sort_column_id(k)
            column.set_resizable(True)
            column.set_reorderable(True)
            # column.set_expand(True)
            column.set_alignment(0)
            column.set_sort_indicator(True)
            self.diskWidgetList[i].diskUsagesTreeView.append_column(column)

            # self.processTreeStore.set_sort_func(i,sorting_func,None)
        self.diskListStores[i].set_sort_func(3, sorting_func, None)

        self.diskActiveArray.append([0] * 100)
        self.diskReadArray.append([0] * 100)
        self.diskWriteArray.append([0] * 100)

        self.diskWidgetList[i].givedata(self, i)
Example #32
0
					    netstat['tx']=(self.network_new[net].bytes_sent - self.network_old[net].bytes_sent)
					except Exception, e:
                                            #print(e)
					    continue
					else:
					    self.network.append(netstat)
				    self.network_old = self.network_new

		#disk io
		if myglobal.get_ps_disk_io_tag():
			self.diskio=[]
			try:
				self.diskio_old
			except Exception:
                            if myglobal.get_ps_disk_io_tag():
			        self.diskio_old = ps.disk_io_counters(True)
			else:
			    try:
				self.diskio_new =  ps.disk_io_counters(True) 
			    except Exceptioni, e:
                                #print(e)
				pass
			    else:
				for disk in self.diskio_new:
				    try:
					diskstat = {}
					diskstat['disk_name'] = disk
					diskstat['read_bytes'] = (self.diskio_new[disk].read_bytes - self.diskio_old[disk].read_bytes)
				        diskstat['write_bytes'] = (self.diskio_new[disk].write_bytes - self.diskio_old[disk].write_bytes)
				    except Exception, e:
                                        #print(e)
Example #33
0
def systemTask():
    try:
        import psutil, time
        filename = 'data/control.conf'
        sql = db.Sql().dbfile('system')
        csql = '''CREATE TABLE IF NOT EXISTS `load_average` (
  `id` INTEGER PRIMARY KEY AUTOINCREMENT,
  `pro` REAL,
  `one` REAL,
  `five` REAL,
  `fifteen` REAL,
  `addtime` INTEGER
)'''
        sql.execute(csql, ())
        cpuIo = cpu = {}
        cpuCount = psutil.cpu_count()
        used = count = 0
        reloadNum = 0
        network_up = network_down = diskio_1 = diskio_2 = networkInfo = cpuInfo = diskInfo = None
        while True:
            if not os.path.exists(filename):
                time.sleep(10)
                continue

            day = 30
            try:
                day = int(public.readFile(filename))
                if day < 1:
                    time.sleep(10)
                    continue
            except:
                day = 30

            tmp = {}
            #取当前CPU Io
            tmp['used'] = psutil.cpu_percent(interval=1)

            if not cpuInfo:
                tmp['mem'] = GetMemUsed()
                cpuInfo = tmp

            if cpuInfo['used'] < tmp['used']:
                tmp['mem'] = GetMemUsed()
                cpuInfo = tmp

            #取当前网络Io
            networkIo = psutil.net_io_counters()[:4]
            if not network_up:
                network_up = networkIo[0]
                network_down = networkIo[1]
            tmp = {}
            tmp['upTotal'] = networkIo[0]
            tmp['downTotal'] = networkIo[1]
            tmp['up'] = round(float((networkIo[0] - network_up) / 1024), 2)
            tmp['down'] = round(float((networkIo[1] - network_down) / 1024), 2)
            tmp['downPackets'] = networkIo[3]
            tmp['upPackets'] = networkIo[2]

            network_up = networkIo[0]
            network_down = networkIo[1]

            if not networkInfo: networkInfo = tmp
            if (tmp['up'] + tmp['down']) > (networkInfo['up'] +
                                            networkInfo['down']):
                networkInfo = tmp

            #取磁盘Io
            disk_ios = True
            try:
                if os.path.exists('/proc/diskstats'):
                    diskio_2 = psutil.disk_io_counters()
                    if not diskio_1: diskio_1 = diskio_2
                    tmp = {}
                    tmp['read_count'] = diskio_2.read_count - diskio_1.read_count
                    tmp['write_count'] = diskio_2.write_count - diskio_1.write_count
                    tmp['read_bytes'] = diskio_2.read_bytes - diskio_1.read_bytes
                    tmp['write_bytes'] = diskio_2.write_bytes - diskio_1.write_bytes
                    tmp['read_time'] = diskio_2.read_time - diskio_1.read_time
                    tmp['write_time'] = diskio_2.write_time - diskio_1.write_time

                    if not diskInfo:
                        diskInfo = tmp
                    else:
                        diskInfo['read_count'] += tmp['read_count']
                        diskInfo['write_count'] += tmp['write_count']
                        diskInfo['read_bytes'] += tmp['read_bytes']
                        diskInfo['write_bytes'] += tmp['write_bytes']
                        diskInfo['read_time'] += tmp['read_time']
                        diskInfo['write_time'] += tmp['write_time']

                    diskio_1 = diskio_2
            except:
                disk_ios = False

            #print diskInfo

            if count >= 12:
                try:
                    addtime = int(time.time())
                    deltime = addtime - (day * 86400)

                    data = (cpuInfo['used'], cpuInfo['mem'], addtime)
                    sql.table('cpuio').add('pro,mem,addtime', data)
                    sql.table('cpuio').where("addtime<?",
                                             (deltime, )).delete()

                    data = (networkInfo['up'] / 5, networkInfo['down'] / 5,
                            networkInfo['upTotal'], networkInfo['downTotal'],
                            networkInfo['downPackets'],
                            networkInfo['upPackets'], addtime)
                    sql.table('network').add(
                        'up,down,total_up,total_down,down_packets,up_packets,addtime',
                        data)
                    sql.table('network').where("addtime<?",
                                               (deltime, )).delete()
                    if os.path.exists('/proc/diskstats') and disk_ios:
                        data = (diskInfo['read_count'],
                                diskInfo['write_count'],
                                diskInfo['read_bytes'],
                                diskInfo['write_bytes'], diskInfo['read_time'],
                                diskInfo['write_time'], addtime)
                        sql.table('diskio').add(
                            'read_count,write_count,read_bytes,write_bytes,read_time,write_time,addtime',
                            data)
                        sql.table('diskio').where("addtime<?",
                                                  (deltime, )).delete()

                    #LoadAverage
                    load_average = GetLoadAverage()
                    lpro = round(
                        (load_average['one'] / load_average['max']) * 100, 2)
                    if lpro > 100: lpro = 100
                    sql.table('load_average').add(
                        'pro,one,five,fifteen,addtime',
                        (lpro, load_average['one'], load_average['five'],
                         load_average['fifteen'], addtime))

                    lpro = None
                    load_average = None
                    cpuInfo = None
                    networkInfo = None
                    diskInfo = None
                    count = 0
                    reloadNum += 1
                    if reloadNum > 1440:
                        reloadNum = 0
                except Exception as ex:
                    print(str(ex))
            del (tmp)

            time.sleep(5)
            count += 1
    except:
        time.sleep(30)
        systemTask()
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(('8.8.8.8', 80))
    NAThost = s.getsockname()[0]
    # '172.24.15.90' 私网IP
except:
    pass
finally:
    s.close()
ResTask = writeResTask()
visitDay = visitDay

diskTtotal = psutil.disk_partitions()
for i in diskTtotal:
    try:
        o = psutil.disk_usage(i.device)
        ioo = psutil.disk_io_counters()
        print(ioo)
    except Exception as e:
        pass
    NADiskTotal = int(o.total / (1024.0 * 1024.0 * 1024.0))
    NADiskUsed = int(o.used / (1024.0 * 1024.0 * 1024.0))
    NADiskFree = int(o.free / (1024.0 * 1024.0 * 1024.0))


@app.route('/ControlPanel', methods=['POST', 'GET'])
@cklogin()
def ControlPanel():
    if request.method == 'GET':
        return render_template(
            'ControlPanel.html',
            inv=ResTask.inv,  # 间隔x秒
Example #35
0
class TestWrapNumbers(unittest.TestCase):
    def setUp(self):
        wrap_numbers.cache_clear()

    tearDown = setUp

    def test_first_call(self):
        input = {'disk1': nt(5, 5, 5)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)

    def test_input_hasnt_changed(self):
        input = {'disk1': nt(5, 5, 5)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)

    def test_increase_but_no_wrap(self):
        input = {'disk1': nt(5, 5, 5)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        input = {'disk1': nt(10, 15, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        input = {'disk1': nt(20, 25, 30)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        input = {'disk1': nt(20, 25, 30)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)

    def test_wrap(self):
        # let's say 100 is the threshold
        input = {'disk1': nt(100, 100, 100)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        # first wrap restarts from 10
        input = {'disk1': nt(100, 100, 10)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(100, 100, 110)})
        # then it remains the same
        input = {'disk1': nt(100, 100, 10)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(100, 100, 110)})
        # then it goes up
        input = {'disk1': nt(100, 100, 90)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(100, 100, 190)})
        # then it wraps again
        input = {'disk1': nt(100, 100, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(100, 100, 210)})
        # and remains the same
        input = {'disk1': nt(100, 100, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(100, 100, 210)})
        # now wrap another num
        input = {'disk1': nt(50, 100, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(150, 100, 210)})
        # and again
        input = {'disk1': nt(40, 100, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(190, 100, 210)})
        # keep it the same
        input = {'disk1': nt(40, 100, 20)}
        self.assertEqual(wrap_numbers(input, 'disk_io'),
                         {'disk1': nt(190, 100, 210)})

    def test_changing_keys(self):
        # Emulate a case where the second call to disk_io()
        # (or whatever) provides a new disk, then the new disk
        # disappears on the third call.
        input = {'disk1': nt(5, 5, 5)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        input = {'disk1': nt(5, 5, 5), 'disk2': nt(7, 7, 7)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        input = {'disk1': nt(8, 8, 8)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)

    def test_changing_keys_w_wrap(self):
        input = {'disk1': nt(50, 50, 50), 'disk2': nt(100, 100, 100)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        # disk 2 wraps
        input = {'disk1': nt(50, 50, 50), 'disk2': nt(100, 100, 10)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), {
            'disk1': nt(50, 50, 50),
            'disk2': nt(100, 100, 110)
        })
        # disk 2 disappears
        input = {'disk1': nt(50, 50, 50)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)

        # then it appears again; the old wrap is supposed to be
        # gone.
        input = {'disk1': nt(50, 50, 50), 'disk2': nt(100, 100, 100)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        # remains the same
        input = {'disk1': nt(50, 50, 50), 'disk2': nt(100, 100, 100)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), input)
        # and then wraps again
        input = {'disk1': nt(50, 50, 50), 'disk2': nt(100, 100, 10)}
        self.assertEqual(wrap_numbers(input, 'disk_io'), {
            'disk1': nt(50, 50, 50),
            'disk2': nt(100, 100, 110)
        })

    def test_real_data(self):
        d = {
            'nvme0n1': (300, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
            'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
            'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
            'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)
        }
        self.assertEqual(wrap_numbers(d, 'disk_io'), d)
        self.assertEqual(wrap_numbers(d, 'disk_io'), d)
        # decrease this   ↓
        d = {
            'nvme0n1': (100, 508, 640, 1571, 5970, 1987, 2049, 451751, 47048),
            'nvme0n1p1': (1171, 2, 5600256, 1024, 516, 0, 0, 0, 8),
            'nvme0n1p2': (54, 54, 2396160, 5165056, 4, 24, 30, 1207, 28),
            'nvme0n1p3': (2389, 4539, 5154, 150, 4828, 1844, 2019, 398, 348)
        }
        out = wrap_numbers(d, 'disk_io')
        self.assertEqual(out['nvme0n1'][0], 400)

    # --- cache tests

    def test_cache_first_call(self):
        input = {'disk1': nt(5, 5, 5)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        self.assertEqual(cache[1], {'disk_io': {}})
        self.assertEqual(cache[2], {'disk_io': {}})

    def test_cache_call_twice(self):
        input = {'disk1': nt(5, 5, 5)}
        wrap_numbers(input, 'disk_io')
        input = {'disk1': nt(10, 10, 10)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        self.assertEqual(
            cache[1],
            {'disk_io': {
                ('disk1', 0): 0,
                ('disk1', 1): 0,
                ('disk1', 2): 0
            }})
        self.assertEqual(cache[2], {'disk_io': {}})

    def test_cache_wrap(self):
        # let's say 100 is the threshold
        input = {'disk1': nt(100, 100, 100)}
        wrap_numbers(input, 'disk_io')

        # first wrap restarts from 10
        input = {'disk1': nt(100, 100, 10)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        self.assertEqual(
            cache[1],
            {'disk_io': {
                ('disk1', 0): 0,
                ('disk1', 1): 0,
                ('disk1', 2): 100
            }})
        self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})

        def assert_():
            cache = wrap_numbers.cache_info()
            self.assertEqual(cache[1], {
                'disk_io': {
                    ('disk1', 0): 0,
                    ('disk1', 1): 0,
                    ('disk1', 2): 100
                }
            })
            self.assertEqual(cache[2],
                             {'disk_io': {
                                 'disk1': set([('disk1', 2)])
                             }})

        # then it remains the same
        input = {'disk1': nt(100, 100, 10)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        assert_()

        # then it goes up
        input = {'disk1': nt(100, 100, 90)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        assert_()

        # then it wraps again
        input = {'disk1': nt(100, 100, 20)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        self.assertEqual(
            cache[1],
            {'disk_io': {
                ('disk1', 0): 0,
                ('disk1', 1): 0,
                ('disk1', 2): 190
            }})
        self.assertEqual(cache[2], {'disk_io': {'disk1': set([('disk1', 2)])}})

    def test_cache_changing_keys(self):
        input = {'disk1': nt(5, 5, 5)}
        wrap_numbers(input, 'disk_io')
        input = {'disk1': nt(5, 5, 5), 'disk2': nt(7, 7, 7)}
        wrap_numbers(input, 'disk_io')
        cache = wrap_numbers.cache_info()
        self.assertEqual(cache[0], {'disk_io': input})
        self.assertEqual(
            cache[1],
            {'disk_io': {
                ('disk1', 0): 0,
                ('disk1', 1): 0,
                ('disk1', 2): 0
            }})
        self.assertEqual(cache[2], {'disk_io': {}})

    def test_cache_clear(self):
        input = {'disk1': nt(5, 5, 5)}
        wrap_numbers(input, 'disk_io')
        wrap_numbers(input, 'disk_io')
        wrap_numbers.cache_clear('disk_io')
        self.assertEqual(wrap_numbers.cache_info(), ({}, {}, {}))
        wrap_numbers.cache_clear('disk_io')
        wrap_numbers.cache_clear('?!?')

    @unittest.skipIf(not psutil.disk_io_counters()
                     or not psutil.net_io_counters(),
                     "no disks or NICs available")
    def test_cache_clear_public_apis(self):
        psutil.disk_io_counters()
        psutil.net_io_counters()
        caches = wrap_numbers.cache_info()
        for cache in caches:
            self.assertIn('psutil.disk_io_counters', cache)
            self.assertIn('psutil.net_io_counters', cache)

        psutil.disk_io_counters.cache_clear()
        caches = wrap_numbers.cache_info()
        for cache in caches:
            self.assertIn('psutil.net_io_counters', cache)
            self.assertNotIn('psutil.disk_io_counters', cache)

        psutil.net_io_counters.cache_clear()
        caches = wrap_numbers.cache_info()
        self.assertEqual(caches, ({}, {}, {}))
Example #36
0
def action():
    # .config(state=DISABLED)
    checkPlatform()
    print("-" * 40, "Sys Info", "-" * 40)
    uname = platform.uname()
    print(f"System:                                 {uname.system}")
    print(f"Node name:                              {uname.node}")
    print(f"Release:                                {uname.release}")
    print(f"Version:                                {uname.version}")
    print(f"Machine:                                {uname.machine}")
    print(f"Processor:                              {uname.processor}")

    # BOOT TIME
    print("-" * 40, "Boot time", "-" * 39)
    boot_time_timestamp = psutil.boot_time()
    bt = datetime.fromtimestamp(boot_time_timestamp)
    print(f"Boot time:                              {bt.day}.{bt.month}.{bt.year} {bt.hour}:{bt.minute}.{bt.second}")

    # CPU INFO
    print("-" * 40, "CPU info", "-" * 40)
    print("Actual Cores:                          ", psutil.cpu_count(logical=False))
    print("Logical Cores:                         ", psutil.cpu_count(logical=True))
    print(f"Max Frequency:                          {psutil.cpu_freq().current:.1f}Mhz")
    print(f"Current Frequency:                      {psutil.cpu_freq().current:.1f}Mhz")
    print(f"CPU usage:                              {psutil.cpu_percent()}%")
    print("CPU usage/core:")
    for i, perc in enumerate(psutil.cpu_percent(percpu=True, interval=1)):
        print(f"\tCore {i}:                         {perc}%")

    def adjust_size(size):
        factor = 1024
        for i2 in ["B", "KB", "MB", "GB", "TB"]:
            if size > factor:
                size = size / factor
            else:
                return f"{size:.3f}{i2}"

    # RAM INFO
    print("-" * 40, "RAM info", "-" * 40)
    virtual_mem = psutil.virtual_memory()
    print(f"Total:                                  {adjust_size(virtual_mem.total)}")
    print(f"Available:                              {adjust_size(virtual_mem.available)}")
    print(f"Used:                                   {adjust_size(virtual_mem.used)}")
    print(f"Percentage:                             {virtual_mem.percent}%")
    print("-" * 20, "SWAP", "-" * 20)
    swap = psutil.swap_memory()
    print(f"Total:                                  {adjust_size(swap.total)}")
    print(f"Free:                                   {adjust_size(swap.free)}")
    print(f"Used:                                   {adjust_size(swap.used)}")
    print(f"Percentage:                             {swap.percent}%")

    # DISK INFO
    print("-" * 40, "Disk info", "-" * 39)
    partitions = psutil.disk_partitions()
    for p in partitions:
        print(f"Device:                                 {p.device}")
        print(f"\tMountpoint:                     {p.mountpoint}")
        print(f"\tFile system type:               {p.fstype}")
        try:
            partitions_usage = psutil.disk_usage(p.mountpoint)
        except PermissionError:
            continue
        print(f"Total size:                             {adjust_size(partitions_usage.total)}")
        print(f"Used:                                   {adjust_size(partitions_usage.used)}")
        print(f"Free:                                   {adjust_size(partitions_usage.free)}")
        print(f"Percentage:                             {partitions_usage.percent}%")
    disk_io = psutil.disk_io_counters()
    print(f"Read since boot:                        {adjust_size(disk_io.read_bytes)}")
    print(f"Written since boot:                     {adjust_size(disk_io.write_bytes)}")

    # GPU INFO
    print("-" * 40, "GPU info", "-" * 40)
    gpus = GPUtil.getGPUs()
    for gpu in gpus:
        print(f"ID:                                     {gpu.id}, Name: {gpu.name}")
        print(f"\tLoad:                           {gpu.load * 100}%")
        print(f"\tFree Mem:                       {gpu.memoryFree}MB")
        print(f"\tUsed Mem:                       {gpu.memoryUsed}MB")
        print(f"\tTotal Mem:                      {gpu.memoryTotal}MB")
        print(f"\tTemperature:                    {gpu.temperature} °C")

    # NETWORK INFO
    print("-" * 40, "Network info", "-" * 36)
    if_addrs = psutil.net_if_addrs()
    for interface_name, interface_addresses in if_addrs.items():
        for address in interface_addresses:
            print(f"Interface:                              {interface_name}")
            if str(address.family) == 'AddressFamily.AF_INET':
                print(f"\tIP Address:                     {address.address}")
                print(f"\tNetmask:                        {address.netmask}")
                print(f"\tBroadcast IP:                   {address.broadcast}")
            elif str(address.family) == 'AddressFamily.AF_PACKET':
                print(f"\tMAC Address:              {address.address}")
                print(f"\tNetmask:                      {address.netmask}")
                print(f"\tBroadcast MAC:            {address.broadcast}")
    net_io = psutil.net_io_counters()
    print(f"Total Bytes send:                       {adjust_size(net_io.bytes_sent)}")
    print(f"Total Bytes received:                   {adjust_size(net_io.bytes_recv)}")
    print(f"Total Packets send:                     {adjust_size(net_io.packets_sent)}")
    print(f"Total Packts received:                  {adjust_size(net_io.packets_recv)}")
    print(f"Total errors while receiving:           {net_io.errin}")
    print(f"Total errors while sending:             {net_io.errout}")
    print(f"Incoming packets which were dropped:    {net_io.dropin}")
    print(f"Outgoing packets which were dropped:    {net_io.dropout}")

    # BATTERY INFO
    print("-" * 40, "Battery info", "-" * 36)
    print(f"Sensor battery:")
    print(f"\t{psutil.sensors_battery()}")
    print()
Example #37
0
import time

memInfo = psutil.virtual_memory()
print(memInfo.total)  # 1024 bit = 1024 Kbit = 1 MBit = ? GBit
print('{:.0f}'.format(memInfo.total / 1024 / 1024))
print('{:.0f}'.format(memInfo.free / 1024 / 1024))
print('{:.0f}'.format(memInfo.used / 1024 / 1024))
print('{:.0f}'.format(memInfo.buffers / 1024 / 1024))
print('{:.0f}'.format(memInfo.cached / 1024 / 1024))

swapInfo = psutil.swap_memory()
print(swapInfo)

diskInfo = psutil.disk_partitions()
print(diskInfo)
diskInfo = psutil.disk_io_counters()
print(diskInfo)
diskInfo = psutil.disk_usage('/')
print(diskInfo.total, diskInfo.free, diskInfo.used)

# 生成CPU/Memory/Disk的信息模板
monitorInfo = [{
    'date': {
        'cpu': {
            'user': None,
            'system': None,
            'iowait': None,
            'idle': None
        },
        'memory': {
            'total': None,
Example #38
0
def disk_usage(param=None, directory=None):
    if param == "basic":
        return psutil.disk_usage(directory)
    else:
        return psutil.disk_io_counters()
Example #39
0
    async def iotop(self, ctx):
        """Snapshot of I/O usage information output by the kernel"""

        if not hasattr(psutil.Process, "oneshot"):
            await self.bot.say("Platform not supported")
            return

        # first get a list of all processes and disk io counters
        procs = [p for p in psutil.process_iter()]
        for p in procs[:]:
            try:
                p._before = p.io_counters()
            except psutil.Error:
                procs.remove(p)
                continue
        disks_before = psutil.disk_io_counters()

        # sleep some time
        await asyncio.sleep(1)

        # then retrieve the same info again
        for p in procs[:]:
            with p.oneshot():
                try:
                    p._after = p.io_counters()
                    p._cmdline = ' '.join(p.cmdline())
                    if not p._cmdline:
                        p._cmdline = p.name()
                    p._username = p.username()
                except (psutil.NoSuchProcess, psutil.ZombieProcess,
                        psutil.AccessDenied):
                    procs.remove(p)
        disks_after = psutil.disk_io_counters()

        # finally calculate results by comparing data before and
        # after the interval
        for p in procs:
            p._read_per_sec = p._after.read_bytes - p._before.read_bytes
            p._write_per_sec = p._after.write_bytes - p._before.write_bytes
            p._total = p._read_per_sec + p._write_per_sec

        disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes
        disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes

        # sort processes by total disk IO so that the more intensive
        # ones get listed first
        processes = sorted(procs, key=lambda p: p._total, reverse=True)

        # print results
        template = "{0:<5} {1:<7} {2:11} {3:11} {4}\n"

        msg = "Total DISK READ: {0} | Total DISK WRITE: {1}\n".format(
            self._size(disks_read_per_sec), self._size(disks_write_per_sec))

        msg += template.format("PID", "USER", "DISK READ", "DISK WRITE",
                               "COMMAND")

        for p in processes:
            msg += template.format(p.pid, p._username[:7],
                                   self._size(p._read_per_sec),
                                   self._size(p._write_per_sec), p._cmdline)
        await self._say(ctx, msg)
        return
Example #40
0
 def test_disk_io_counters(self):
     # Duplicate of test_system.py. Keep it anyway.
     for k, v in psutil.disk_io_counters(perdisk=True).items():
         self.assertIsInstance(k, str)
         self.assert_ntuple_of_nums(v, type_=(int, long))
Example #41
0
async def info(request):
    """HTTP Method to retun node state to caller"""
    log.debug("info request")
    app = request.app
    answer = {}
    # copy relevant entries from state dictionary to response
    node = {}
    node['id'] = request.app['id']
    node['type'] = request.app['node_type']
    node['start_time'] =  app["start_time"] #unixTimeToUTC(app['start_time'])
    node['state'] = app['node_state']
    node['node_number'] = app['node_number']
    node['node_count'] = app['node_count']

    answer["node"] = node
    # psutil info
    # see: http://pythonhosted.org/psutil/ for description of different fields
    cpu = {}
    cpu["percent"] = psutil.cpu_percent()
    cpu["cores"] = psutil.cpu_count()
    answer["cpu"] = cpu
    diskio = psutil.disk_io_counters()
    disk_stats = {}
    disk_stats["read_count"] = diskio.read_count
    disk_stats["read_time"] = diskio.read_time
    disk_stats["read_bytes"] = diskio.read_bytes
    disk_stats["write_count"] = diskio.write_count
    disk_stats["write_time"] = diskio.write_time
    disk_stats["write_bytes"] = diskio.write_bytes
    answer["diskio"] = disk_stats
    netio = psutil.net_io_counters()
    net_stats = {}
    net_stats["bytes_sent"] = netio.bytes_sent
    net_stats["bytes_sent"] = netio.bytes_recv
    net_stats["packets_sent"] = netio.packets_sent
    net_stats["packets_recv"] = netio.packets_recv
    net_stats["errin"] = netio.errin
    net_stats["errout"] = netio.errout
    net_stats["dropin"] = netio.dropin
    net_stats["dropout"] = netio.dropout
    answer["netio"] = net_stats
    mem_stats = {}
    svmem = psutil.virtual_memory()
    mem_stats["phys_total"] = svmem.total
    mem_stats["phys_available"] = svmem.available
    sswap = psutil.swap_memory()
    mem_stats["swap_total"] = sswap.total
    mem_stats["swap_used"] = sswap.used
    mem_stats["swap_free"] = sswap.free
    mem_stats["percent"] = sswap.percent
    answer["memory"] = mem_stats
    disk_stats = {}
    sdiskusage = psutil.disk_usage('/')
    disk_stats["total"] = sdiskusage.total
    disk_stats["used"] = sdiskusage.used
    disk_stats["free"] = sdiskusage.free
    disk_stats["percent"] = sdiskusage.percent
    answer["disk"] = disk_stats
    answer["log_stats"] = app["log_count"]
    answer["req_count"] = app["req_count"]
    if "s3_stats" in app:
        answer["s3_stats"] = app["s3_stats"]
    elif "azure_stats" in app:
        answer["azure_stats"] = app["azure_stats"]
    mc_stats = {}
    if "meta_cache" in app:
        mc = app["meta_cache"]  # only DN nodes have this
        mc_stats["count"] = len(mc)
        mc_stats["dirty_count"] = mc.dirtyCount
        mc_stats["utililization_per"] = mc.cacheUtilizationPercent
        mc_stats["mem_used"] = mc.memUsed
        mc_stats["mem_target"] = mc.memTarget
    answer["meta_cache_stats"] = mc_stats
    cc_stats = {}
    if "chunk_cache" in app:
        cc = app["chunk_cache"]  # only DN nodes have this
        cc_stats["count"] = len(cc)
        cc_stats["dirty_count"] = cc.dirtyCount
        cc_stats["utililization_per"] = cc.cacheUtilizationPercent
        cc_stats["mem_used"] = cc.memUsed
        cc_stats["mem_target"] = cc.memTarget
    answer["chunk_cache_stats"] = cc_stats
    dc_stats = {}
    if "domain_cache" in app:
        dc = app["domain_cache"]  # only DN nodes have this
        dc_stats["count"] = len(dc)
        dc_stats["dirty_count"] = dc.dirtyCount
        dc_stats["utililization_per"] = dc.cacheUtilizationPercent
        dc_stats["mem_used"] = dc.memUsed
        dc_stats["mem_target"] = dc.memTarget
    answer["domain_cache_stats"] = dc_stats

    resp = await jsonResponse(request, answer)
    log.response(request, resp=resp)
    return resp
Example #42
0
def procInit(self):
    # self.processTree=self.builder.get_object('processtree')
    
    self.processTree=g.TreeView()
    self.process_tab_box.add(self.processTree)
    self.process_tab_box.show_all()
    self.processTree_background=self.builder.get_object('processtreeBackground')
    self.process_kill_button=self.builder.get_object('processKillButton')
    self.process_kill_button.connect('clicked',self.kill_process)

    # self.data=[['chrome',30,50,0,1],['firefox',10,20,0,2],['sysmon',1,0,3,1]]
    #                                 0   1   2   3   4   5   6   7   8   9   10   11  12  13       14
    self.processTreeStore=g.TreeStore(int,str,str,str,str,str,str,str,str,str,str,str,str,str,GdkPixbuf.Pixbuf)
    # self.processTreeStore=self.builder.get_object('processTreeStore')

    # for data in self.data:
    #     self.processTreeStore.append(None,data)

    # self.processTreeStore.set_sort_func(4,sorting_func,None)

    pids=ps.pids()

    # self.di={}
    self.procDiskprev={}
    self.processList={}
    self.processTreeIterList={}
    self.processChildList={}
    self.columnList={}
    self.procT1={}

    ### total disk io counter calculation are done in proc.py
    self.diskTotalT1=0
    diskio=ps.disk_io_counters()
    self.diskTotalState1=[diskio[2],diskio[3]]

    self.systemdId=[]
    self.processSystemd=[]
    for pi in pids:
        procs=ps.Process(pi)

        if(procs.username()!='root'):
            if procs.name()=='systemd':
                self.systemdId.append(pi)
                self.processSystemd.append(procs)
                searcher(self,procs,None)          ## for multiple user view
                # break

    # self.processSystemd=ps.Process(self.systemdId)
    # searcher(self,self.processSystemd,None)
    
    self.processTree.set_model(self.processTreeStore)
    #                          0    1       2      3        4       5           6       7           8               9           10              11      12       13       
    self.column_header_list=['pid','Name','rCPU','CPU','rMemory','Memory','rDiskRead','DiskRead','rDiskWrite','DiskWrite','Resident\nMemory','Shared','Owner','Command']

    self.column_select_popover_check_buttons={}
    self.column_header_labels=[]
    self.column_select_popover=g.Menu()

    for i,col in enumerate(self.column_header_list):
        renderer=g.CellRendererText()
        if col=='Command':
            renderer.props.wrap_width=-1
        if col=='Name':
            icon_renderer=g.CellRendererPixbuf()
            column=g.TreeViewColumn(col)
            column.pack_start(icon_renderer,False)
            column.add_attribute(icon_renderer,'pixbuf',14)
            column.pack_start(renderer,False)
            column.add_attribute(renderer,'text',1)
            
        else:
            column=g.TreeViewColumn(col,renderer,text=i)
        
        ## forcing the column header to have the widget to get the button
        label = g.Label(col)
        label.show()
        self.column_header_labels.append(label)
        column.set_widget(label)

        widget = column.get_widget()
        while not isinstance(widget, g.Button):
            widget = widget.get_parent()
        widget.connect('button-press-event',self.column_button_press)

        column.set_sort_column_id(i)
        column.set_resizable(True)
        column.set_reorderable(True)
        column.set_expand(True)
        column.set_alignment(0)
        column.set_sort_indicator(True)
        
        self.processTree.append_column(column)
        self.columnList[i]=column

        popover_check_button=g.CheckMenuItem(label=col)
        popover_check_button.set_name(str(i))
        popover_check_button.connect('toggled',self.column_header_selection)
        popover_check_button.set_active(True)
        self.column_select_popover.append(popover_check_button)
        self.column_select_popover_check_buttons[i]=popover_check_button

        if i!=1:   
            self.processTreeStore.set_sort_func(i,sorting_func,None)

    self.column_select_popover.show_all()

    selected_row=self.processTree.get_selection()
    selected_row.connect("changed",self.row_selected)
    
    # self.processTree.connect('button-press-event',self.column_button_press)
    # self.processTree.connect(popover=self.column_select_popover)

    self.column_select_popover_check_buttons[6].set_active(False)
    self.column_select_popover_check_buttons[8].set_active(False)
    self.column_select_popover_check_buttons[10].set_active(False)
    self.column_select_popover_check_buttons[11].set_active(False)
Example #43
0
def update_stats():

    req_time = int(time.time())

    disk_write_data_start = psutil.disk_io_counters(perdisk=False)
    io_data_start = psutil.net_io_counters()

    # Some metrics are only reported in values since uptime,
    # so sample over a period (in seconds) to get rate.

    time.sleep(period)

    cpu_data = psutil.cpu_percent(interval=None)
    ram_data = psutil.virtual_memory()

    # contains all disk data (with total size >= 1GB)
    disks_data = list(filter(lambda y: y[1][0] >= 1000000000, [(x.mountpoint, psutil.disk_usage(x.mountpoint)) for x in psutil.disk_partitions()]))

    disks_idx = (req_time // 6) % len(disks_data)

    # contains "currently displayed" disk data (cycles based on time)
    disk_mount = disks_data[disks_idx][0]
    disk_data = disks_data[disks_idx][1]

    disk_write_data = psutil.disk_io_counters(perdisk=False)
    io_data = psutil.net_io_counters()

    data = {
        'cpu': {
            'percent': cpu_data
        },
        'ram': {
            'percent': ram_data[2],
            'total': ram_data[0],
            'used': ram_data[3]
        },
        'disks': [], # todo: work in progress
        'disk': {
            'mount': disk_mount,
            'total': disk_data[0],
            'used': disk_data[1],
            'percent': disk_data[3]
        },
        'disk_io': {
            'read_bytes_sec': (disk_write_data[2] - disk_write_data_start[2])
                              / period,
            'write_bytes_sec': (disk_write_data[3] - disk_write_data_start[3])
                               / period
        },
        'net_io': {
            'sent_bytes_sec': (io_data[0] - io_data_start[0]) / period,
            'received_bytes_sec': (io_data[1] - io_data_start[1]) / period
        }
    }
    for disk_data in disks_data:
      data['disks'].append({'mount': disk_data[0],
                            'total': disk_data[1][0],
                            'used': disk_data[1][1],
                            'percent': disk_data[1][3]})

    return json.dumps(data)
Example #44
0
class TestSystemAPIs(unittest.TestCase):
    """Tests for system-related APIs."""
    def setUp(self):
        safe_rmpath(TESTFN)

    def tearDown(self):
        reap_children()

    def test_process_iter(self):
        self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()])
        sproc = get_test_subprocess()
        self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()])
        p = psutil.Process(sproc.pid)
        p.kill()
        p.wait()
        self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()])

        with mock.patch('psutil.Process',
                        side_effect=psutil.NoSuchProcess(os.getpid())):
            self.assertEqual(list(psutil.process_iter()), [])
        with mock.patch('psutil.Process',
                        side_effect=psutil.AccessDenied(os.getpid())):
            with self.assertRaises(psutil.AccessDenied):
                list(psutil.process_iter())

    def test_prcess_iter_w_params(self):
        for p in psutil.process_iter(attrs=['pid']):
            self.assertEqual(list(p.info.keys()), ['pid'])
        with self.assertRaises(ValueError):
            list(psutil.process_iter(attrs=['foo']))
        with mock.patch("psutil._psplatform.Process.cpu_times",
                        side_effect=psutil.AccessDenied(0, "")) as m:
            for p in psutil.process_iter(attrs=["pid", "cpu_times"]):
                self.assertIsNone(p.info['cpu_times'])
                self.assertGreaterEqual(p.info['pid'], 0)
            assert m.called
        with mock.patch("psutil._psplatform.Process.cpu_times",
                        side_effect=psutil.AccessDenied(0, "")) as m:
            flag = object()
            for p in psutil.process_iter(attrs=["pid", "cpu_times"],
                                         ad_value=flag):
                self.assertIs(p.info['cpu_times'], flag)
                self.assertGreaterEqual(p.info['pid'], 0)
            assert m.called

    def test_wait_procs(self):
        def callback(p):
            pids.append(p.pid)

        pids = []
        sproc1 = get_test_subprocess()
        sproc2 = get_test_subprocess()
        sproc3 = get_test_subprocess()
        procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
        self.assertRaises(ValueError, psutil.wait_procs, procs, timeout=-1)
        self.assertRaises(TypeError, psutil.wait_procs, procs, callback=1)
        t = time.time()
        gone, alive = psutil.wait_procs(procs, timeout=0.01, callback=callback)

        self.assertLess(time.time() - t, 0.5)
        self.assertEqual(gone, [])
        self.assertEqual(len(alive), 3)
        self.assertEqual(pids, [])
        for p in alive:
            self.assertFalse(hasattr(p, 'returncode'))

        @retry_on_failure(30)
        def test(procs, callback):
            gone, alive = psutil.wait_procs(procs,
                                            timeout=0.03,
                                            callback=callback)
            self.assertEqual(len(gone), 1)
            self.assertEqual(len(alive), 2)
            return gone, alive

        sproc3.terminate()
        gone, alive = test(procs, callback)
        self.assertIn(sproc3.pid, [x.pid for x in gone])
        if POSIX:
            self.assertEqual(gone.pop().returncode, -signal.SIGTERM)
        else:
            self.assertEqual(gone.pop().returncode, 1)
        self.assertEqual(pids, [sproc3.pid])
        for p in alive:
            self.assertFalse(hasattr(p, 'returncode'))

        @retry_on_failure(30)
        def test(procs, callback):
            gone, alive = psutil.wait_procs(procs,
                                            timeout=0.03,
                                            callback=callback)
            self.assertEqual(len(gone), 3)
            self.assertEqual(len(alive), 0)
            return gone, alive

        sproc1.terminate()
        sproc2.terminate()
        gone, alive = test(procs, callback)
        self.assertEqual(set(pids), set([sproc1.pid, sproc2.pid, sproc3.pid]))
        for p in gone:
            self.assertTrue(hasattr(p, 'returncode'))

    def test_wait_procs_no_timeout(self):
        sproc1 = get_test_subprocess()
        sproc2 = get_test_subprocess()
        sproc3 = get_test_subprocess()
        procs = [psutil.Process(x.pid) for x in (sproc1, sproc2, sproc3)]
        for p in procs:
            p.terminate()
        gone, alive = psutil.wait_procs(procs)

    def test_boot_time(self):
        bt = psutil.boot_time()
        self.assertIsInstance(bt, float)
        self.assertGreater(bt, 0)
        self.assertLess(bt, time.time())

    @unittest.skipIf(not POSIX, 'POSIX only')
    def test_PAGESIZE(self):
        # pagesize is used internally to perform different calculations
        # and it's determined by using SC_PAGE_SIZE; make sure
        # getpagesize() returns the same value.
        import resource
        self.assertEqual(os.sysconf("SC_PAGE_SIZE"), resource.getpagesize())

    def test_virtual_memory(self):
        mem = psutil.virtual_memory()
        assert mem.total > 0, mem
        assert mem.available > 0, mem
        assert 0 <= mem.percent <= 100, mem
        assert mem.used > 0, mem
        assert mem.free >= 0, mem
        for name in mem._fields:
            value = getattr(mem, name)
            if name != 'percent':
                self.assertIsInstance(value, (int, long))
            if name != 'total':
                if not value >= 0:
                    self.fail("%r < 0 (%s)" % (name, value))
                if value > mem.total:
                    self.fail("%r > total (total=%s, %s=%s)" %
                              (name, mem.total, name, value))

    def test_swap_memory(self):
        mem = psutil.swap_memory()
        self.assertEqual(mem._fields,
                         ('total', 'used', 'free', 'percent', 'sin', 'sout'))

        assert mem.total >= 0, mem
        assert mem.used >= 0, mem
        if mem.total > 0:
            # likely a system with no swap partition
            assert mem.free > 0, mem
        else:
            assert mem.free == 0, mem
        assert 0 <= mem.percent <= 100, mem
        assert mem.sin >= 0, mem
        assert mem.sout >= 0, mem

    def test_pid_exists(self):
        sproc = get_test_subprocess()
        self.assertTrue(psutil.pid_exists(sproc.pid))
        p = psutil.Process(sproc.pid)
        p.kill()
        p.wait()
        self.assertFalse(psutil.pid_exists(sproc.pid))
        self.assertFalse(psutil.pid_exists(-1))
        self.assertEqual(psutil.pid_exists(0), 0 in psutil.pids())

    def test_pid_exists_2(self):
        reap_children()
        pids = psutil.pids()
        for pid in pids:
            try:
                assert psutil.pid_exists(pid)
            except AssertionError:
                # in case the process disappeared in meantime fail only
                # if it is no longer in psutil.pids()
                time.sleep(.1)
                if pid in psutil.pids():
                    self.fail(pid)
        pids = range(max(pids) + 5000, max(pids) + 6000)
        for pid in pids:
            self.assertFalse(psutil.pid_exists(pid), msg=pid)

    def test_pids(self):
        pidslist = psutil.pids()
        procslist = [x.pid for x in psutil.process_iter()]
        # make sure every pid is unique
        self.assertEqual(sorted(set(pidslist)), pidslist)
        self.assertEqual(pidslist, procslist)

    def test_test(self):
        # test for psutil.test() function
        stdout = sys.stdout
        sys.stdout = DEVNULL
        try:
            psutil.test()
        finally:
            sys.stdout = stdout

    def test_cpu_count(self):
        logical = psutil.cpu_count()
        self.assertEqual(logical, len(psutil.cpu_times(percpu=True)))
        self.assertGreaterEqual(logical, 1)
        #
        if os.path.exists("/proc/cpuinfo"):
            with open("/proc/cpuinfo") as fd:
                cpuinfo_data = fd.read()
            if "physical id" not in cpuinfo_data:
                raise unittest.SkipTest("cpuinfo doesn't include physical id")
        physical = psutil.cpu_count(logical=False)
        if WINDOWS and sys.getwindowsversion()[:2] <= (6, 1):  # <= Vista
            self.assertIsNone(physical)
        else:
            self.assertGreaterEqual(physical, 1)
            self.assertGreaterEqual(logical, physical)

    def test_cpu_count_none(self):
        # https://github.com/giampaolo/psutil/issues/1085
        for val in (-1, 0, None):
            with mock.patch('psutil._psplatform.cpu_count_logical',
                            return_value=val) as m:
                self.assertIsNone(psutil.cpu_count())
                assert m.called
            with mock.patch('psutil._psplatform.cpu_count_physical',
                            return_value=val) as m:
                self.assertIsNone(psutil.cpu_count(logical=False))
                assert m.called

    def test_cpu_times(self):
        # Check type, value >= 0, str().
        total = 0
        times = psutil.cpu_times()
        sum(times)
        for cp_time in times:
            self.assertIsInstance(cp_time, float)
            self.assertGreaterEqual(cp_time, 0.0)
            total += cp_time
        self.assertEqual(total, sum(times))
        str(times)
        # CPU times are always supposed to increase over time
        # or at least remain the same and that's because time
        # cannot go backwards.
        # Surprisingly sometimes this might not be the case (at
        # least on Windows and Linux), see:
        # https://github.com/giampaolo/psutil/issues/392
        # https://github.com/giampaolo/psutil/issues/645
        # if not WINDOWS:
        #     last = psutil.cpu_times()
        #     for x in range(100):
        #         new = psutil.cpu_times()
        #         for field in new._fields:
        #             new_t = getattr(new, field)
        #             last_t = getattr(last, field)
        #             self.assertGreaterEqual(new_t, last_t,
        #                                     msg="%s %s" % (new_t, last_t))
        #         last = new

    def test_cpu_times_time_increases(self):
        # Make sure time increases between calls.
        t1 = sum(psutil.cpu_times())
        stop_at = time.time() + 1
        while time.time() < stop_at:
            t2 = sum(psutil.cpu_times())
            if t2 > t1:
                return
        self.fail("time remained the same")

    def test_per_cpu_times(self):
        # Check type, value >= 0, str().
        for times in psutil.cpu_times(percpu=True):
            total = 0
            sum(times)
            for cp_time in times:
                self.assertIsInstance(cp_time, float)
                self.assertGreaterEqual(cp_time, 0.0)
                total += cp_time
            self.assertEqual(total, sum(times))
            str(times)
        self.assertEqual(len(psutil.cpu_times(percpu=True)[0]),
                         len(psutil.cpu_times(percpu=False)))

        # Note: in theory CPU times are always supposed to increase over
        # time or remain the same but never go backwards. In practice
        # sometimes this is not the case.
        # This issue seemd to be afflict Windows:
        # https://github.com/giampaolo/psutil/issues/392
        # ...but it turns out also Linux (rarely) behaves the same.
        # last = psutil.cpu_times(percpu=True)
        # for x in range(100):
        #     new = psutil.cpu_times(percpu=True)
        #     for index in range(len(new)):
        #         newcpu = new[index]
        #         lastcpu = last[index]
        #         for field in newcpu._fields:
        #             new_t = getattr(newcpu, field)
        #             last_t = getattr(lastcpu, field)
        #             self.assertGreaterEqual(
        #                 new_t, last_t, msg="%s %s" % (lastcpu, newcpu))
        #     last = new

    def test_per_cpu_times_2(self):
        # Simulate some work load then make sure time have increased
        # between calls.
        tot1 = psutil.cpu_times(percpu=True)
        stop_at = time.time() + 0.1
        while True:
            if time.time() >= stop_at:
                break
        tot2 = psutil.cpu_times(percpu=True)
        for t1, t2 in zip(tot1, tot2):
            t1, t2 = sum(t1), sum(t2)
            difference = t2 - t1
            if difference >= 0.05:
                return
        self.fail()

    def test_cpu_times_comparison(self):
        # Make sure the sum of all per cpu times is almost equal to
        # base "one cpu" times.
        base = psutil.cpu_times()
        per_cpu = psutil.cpu_times(percpu=True)
        summed_values = base._make([sum(num) for num in zip(*per_cpu)])
        for field in base._fields:
            self.assertAlmostEqual(getattr(base, field),
                                   getattr(summed_values, field),
                                   delta=1)

    def _test_cpu_percent(self, percent, last_ret, new_ret):
        try:
            self.assertIsInstance(percent, float)
            self.assertGreaterEqual(percent, 0.0)
            self.assertIsNot(percent, -0.0)
            self.assertLessEqual(percent, 100.0 * psutil.cpu_count())
        except AssertionError as err:
            raise AssertionError(
                "\n%s\nlast=%s\nnew=%s" %
                (err, pprint.pformat(last_ret), pprint.pformat(new_ret)))

    def test_cpu_percent(self):
        last = psutil.cpu_percent(interval=0.001)
        for x in range(100):
            new = psutil.cpu_percent(interval=None)
            self._test_cpu_percent(new, last, new)
            last = new
        with self.assertRaises(ValueError):
            psutil.cpu_percent(interval=-1)

    def test_per_cpu_percent(self):
        last = psutil.cpu_percent(interval=0.001, percpu=True)
        self.assertEqual(len(last), psutil.cpu_count())
        for x in range(100):
            new = psutil.cpu_percent(interval=None, percpu=True)
            for percent in new:
                self._test_cpu_percent(percent, last, new)
            last = new
        with self.assertRaises(ValueError):
            psutil.cpu_percent(interval=-1, percpu=True)

    def test_cpu_times_percent(self):
        last = psutil.cpu_times_percent(interval=0.001)
        for x in range(100):
            new = psutil.cpu_times_percent(interval=None)
            for percent in new:
                self._test_cpu_percent(percent, last, new)
            self._test_cpu_percent(sum(new), last, new)
            last = new

    def test_per_cpu_times_percent(self):
        last = psutil.cpu_times_percent(interval=0.001, percpu=True)
        self.assertEqual(len(last), psutil.cpu_count())
        for x in range(100):
            new = psutil.cpu_times_percent(interval=None, percpu=True)
            for cpu in new:
                for percent in cpu:
                    self._test_cpu_percent(percent, last, new)
                self._test_cpu_percent(sum(cpu), last, new)
            last = new

    def test_per_cpu_times_percent_negative(self):
        # see: https://github.com/giampaolo/psutil/issues/645
        psutil.cpu_times_percent(percpu=True)
        zero_times = [
            x._make([0 for x in range(len(x._fields))])
            for x in psutil.cpu_times(percpu=True)
        ]
        with mock.patch('psutil.cpu_times', return_value=zero_times):
            for cpu in psutil.cpu_times_percent(percpu=True):
                for percent in cpu:
                    self._test_cpu_percent(percent, None, None)

    def test_disk_usage(self):
        usage = psutil.disk_usage(os.getcwd())
        self.assertEqual(usage._fields, ('total', 'used', 'free', 'percent'))

        assert usage.total > 0, usage
        assert usage.used > 0, usage
        assert usage.free > 0, usage
        assert usage.total > usage.used, usage
        assert usage.total > usage.free, usage
        assert 0 <= usage.percent <= 100, usage.percent
        if hasattr(shutil, 'disk_usage'):
            # py >= 3.3, see: http://bugs.python.org/issue12442
            shutil_usage = shutil.disk_usage(os.getcwd())
            tolerance = 5 * 1024 * 1024  # 5MB
            self.assertEqual(usage.total, shutil_usage.total)
            self.assertAlmostEqual(usage.free,
                                   shutil_usage.free,
                                   delta=tolerance)
            self.assertAlmostEqual(usage.used,
                                   shutil_usage.used,
                                   delta=tolerance)

        # if path does not exist OSError ENOENT is expected across
        # all platforms
        fname = tempfile.mktemp()
        with self.assertRaises(OSError) as exc:
            psutil.disk_usage(fname)
        self.assertEqual(exc.exception.errno, errno.ENOENT)

    def test_disk_usage_unicode(self):
        # See: https://github.com/giampaolo/psutil/issues/416
        if ASCII_FS:
            with self.assertRaises(UnicodeEncodeError):
                psutil.disk_usage(TESTFN_UNICODE)

    def test_disk_usage_bytes(self):
        psutil.disk_usage(b'.')

    def test_disk_partitions(self):
        # all = False
        ls = psutil.disk_partitions(all=False)
        # on travis we get:
        #     self.assertEqual(p.cpu_affinity(), [n])
        # AssertionError: Lists differ: [0, 1, 2, 3, 4, 5, 6, 7,... != [0]
        self.assertTrue(ls, msg=ls)
        for disk in ls:
            self.assertIsInstance(disk.device, str)
            self.assertIsInstance(disk.mountpoint, str)
            self.assertIsInstance(disk.fstype, str)
            self.assertIsInstance(disk.opts, str)
            if WINDOWS and 'cdrom' in disk.opts:
                continue
            if not POSIX:
                assert os.path.exists(disk.device), disk
            else:
                # we cannot make any assumption about this, see:
                # http://goo.gl/p9c43
                disk.device
            if SUNOS or TRAVIS:
                # on solaris apparently mount points can also be files
                assert os.path.exists(disk.mountpoint), disk
            else:
                assert os.path.isdir(disk.mountpoint), disk
            assert disk.fstype, disk

        # all = True
        ls = psutil.disk_partitions(all=True)
        self.assertTrue(ls, msg=ls)
        for disk in psutil.disk_partitions(all=True):
            if not WINDOWS:
                try:
                    os.stat(disk.mountpoint)
                except OSError as err:
                    if TRAVIS and MACOS and err.errno == errno.EIO:
                        continue
                    # http://mail.python.org/pipermail/python-dev/
                    #     2012-June/120787.html
                    if err.errno not in (errno.EPERM, errno.EACCES):
                        raise
                else:
                    assert os.path.exists(disk.mountpoint), disk
            self.assertIsInstance(disk.fstype, str)
            self.assertIsInstance(disk.opts, str)

        def find_mount_point(path):
            path = os.path.abspath(path)
            while not os.path.ismount(path):
                path = os.path.dirname(path)
            return path.lower()

        mount = find_mount_point(__file__)
        mounts = [
            x.mountpoint.lower() for x in psutil.disk_partitions(all=True)
        ]
        self.assertIn(mount, mounts)
        psutil.disk_usage(mount)

    @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
    def test_net_io_counters(self):
        def check_ntuple(nt):
            self.assertEqual(nt[0], nt.bytes_sent)
            self.assertEqual(nt[1], nt.bytes_recv)
            self.assertEqual(nt[2], nt.packets_sent)
            self.assertEqual(nt[3], nt.packets_recv)
            self.assertEqual(nt[4], nt.errin)
            self.assertEqual(nt[5], nt.errout)
            self.assertEqual(nt[6], nt.dropin)
            self.assertEqual(nt[7], nt.dropout)
            assert nt.bytes_sent >= 0, nt
            assert nt.bytes_recv >= 0, nt
            assert nt.packets_sent >= 0, nt
            assert nt.packets_recv >= 0, nt
            assert nt.errin >= 0, nt
            assert nt.errout >= 0, nt
            assert nt.dropin >= 0, nt
            assert nt.dropout >= 0, nt

        ret = psutil.net_io_counters(pernic=False)
        check_ntuple(ret)
        ret = psutil.net_io_counters(pernic=True)
        self.assertNotEqual(ret, [])
        for key in ret:
            self.assertTrue(key)
            self.assertIsInstance(key, str)
            check_ntuple(ret[key])

    @unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
    def test_net_io_counters_no_nics(self):
        # Emulate a case where no NICs are installed, see:
        # https://github.com/giampaolo/psutil/issues/1062
        with mock.patch('psutil._psplatform.net_io_counters',
                        return_value={}) as m:
            self.assertIsNone(psutil.net_io_counters(pernic=False))
            self.assertEqual(psutil.net_io_counters(pernic=True), {})
            assert m.called

    def test_net_if_addrs(self):
        nics = psutil.net_if_addrs()
        assert nics, nics

        nic_stats = psutil.net_if_stats()

        # Not reliable on all platforms (net_if_addrs() reports more
        # interfaces).
        # self.assertEqual(sorted(nics.keys()),
        #                  sorted(psutil.net_io_counters(pernic=True).keys()))

        families = set([socket.AF_INET, socket.AF_INET6, psutil.AF_LINK])
        for nic, addrs in nics.items():
            self.assertIsInstance(nic, str)
            self.assertEqual(len(set(addrs)), len(addrs))
            for addr in addrs:
                self.assertIsInstance(addr.family, int)
                self.assertIsInstance(addr.address, str)
                self.assertIsInstance(addr.netmask, (str, type(None)))
                self.assertIsInstance(addr.broadcast, (str, type(None)))
                self.assertIn(addr.family, families)
                if sys.version_info >= (3, 4):
                    self.assertIsInstance(addr.family, enum.IntEnum)
                if nic_stats[nic].isup:
                    # Do not test binding to addresses of interfaces
                    # that are down
                    if addr.family == socket.AF_INET:
                        s = socket.socket(addr.family)
                        with contextlib.closing(s):
                            s.bind((addr.address, 0))
                    elif addr.family == socket.AF_INET6:
                        info = socket.getaddrinfo(addr.address, 0,
                                                  socket.AF_INET6,
                                                  socket.SOCK_STREAM, 0,
                                                  socket.AI_PASSIVE)[0]
                        af, socktype, proto, canonname, sa = info
                        s = socket.socket(af, socktype, proto)
                        with contextlib.closing(s):
                            s.bind(sa)
                for ip in (addr.address, addr.netmask, addr.broadcast,
                           addr.ptp):
                    if ip is not None:
                        # TODO: skip AF_INET6 for now because I get:
                        # AddressValueError: Only hex digits permitted in
                        # u'c6f3%lxcbr0' in u'fe80::c8e0:fff:fe54:c6f3%lxcbr0'
                        if addr.family != socket.AF_INET6:
                            check_net_address(ip, addr.family)
                # broadcast and ptp addresses are mutually exclusive
                if addr.broadcast:
                    self.assertIsNone(addr.ptp)
                elif addr.ptp:
                    self.assertIsNone(addr.broadcast)

        if BSD or MACOS or SUNOS:
            if hasattr(socket, "AF_LINK"):
                self.assertEqual(psutil.AF_LINK, socket.AF_LINK)
        elif LINUX:
            self.assertEqual(psutil.AF_LINK, socket.AF_PACKET)
        elif WINDOWS:
            self.assertEqual(psutil.AF_LINK, -1)

    def test_net_if_addrs_mac_null_bytes(self):
        # Simulate that the underlying C function returns an incomplete
        # MAC address. psutil is supposed to fill it with null bytes.
        # https://github.com/giampaolo/psutil/issues/786
        if POSIX:
            ret = [('em1', psutil.AF_LINK, '06:3d:29', None, None, None)]
        else:
            ret = [('em1', -1, '06-3d-29', None, None, None)]
        with mock.patch('psutil._psplatform.net_if_addrs',
                        return_value=ret) as m:
            addr = psutil.net_if_addrs()['em1'][0]
            assert m.called
            if POSIX:
                self.assertEqual(addr.address, '06:3d:29:00:00:00')
            else:
                self.assertEqual(addr.address, '06-3d-29-00-00-00')

    @unittest.skipIf(TRAVIS, "unreliable on TRAVIS")  # raises EPERM
    def test_net_if_stats(self):
        nics = psutil.net_if_stats()
        assert nics, nics
        all_duplexes = (psutil.NIC_DUPLEX_FULL, psutil.NIC_DUPLEX_HALF,
                        psutil.NIC_DUPLEX_UNKNOWN)
        for name, stats in nics.items():
            self.assertIsInstance(name, str)
            isup, duplex, speed, mtu = stats
            self.assertIsInstance(isup, bool)
            self.assertIn(duplex, all_duplexes)
            self.assertIn(duplex, all_duplexes)
            self.assertGreaterEqual(speed, 0)
            self.assertGreaterEqual(mtu, 0)

    @unittest.skipIf(not (LINUX or BSD or MACOS),
                     "LINUX or BSD or MACOS specific")
    def test_net_if_stats_enodev(self):
        # See: https://github.com/giampaolo/psutil/issues/1279
        with mock.patch('psutil._psutil_posix.net_if_mtu',
                        side_effect=OSError(errno.ENODEV, "")) as m:
            ret = psutil.net_if_stats()
            self.assertEqual(ret, {})
            assert m.called

    @unittest.skipIf(LINUX and not os.path.exists('/proc/diskstats'),
                     '/proc/diskstats not available on this linux version')
    @unittest.skipIf(APPVEYOR and psutil.disk_io_counters() is None,
                     "unreliable on APPVEYOR")  # no visible disks
    def test_disk_io_counters(self):
        def check_ntuple(nt):
            self.assertEqual(nt[0], nt.read_count)
            self.assertEqual(nt[1], nt.write_count)
            self.assertEqual(nt[2], nt.read_bytes)
            self.assertEqual(nt[3], nt.write_bytes)
            if not (OPENBSD or NETBSD):
                self.assertEqual(nt[4], nt.read_time)
                self.assertEqual(nt[5], nt.write_time)
                if LINUX:
                    self.assertEqual(nt[6], nt.read_merged_count)
                    self.assertEqual(nt[7], nt.write_merged_count)
                    self.assertEqual(nt[8], nt.busy_time)
                elif FREEBSD:
                    self.assertEqual(nt[6], nt.busy_time)
            for name in nt._fields:
                assert getattr(nt, name) >= 0, nt

        ret = psutil.disk_io_counters(perdisk=False)
        assert ret is not None, "no disks on this system?"
        check_ntuple(ret)
        ret = psutil.disk_io_counters(perdisk=True)
        # make sure there are no duplicates
        self.assertEqual(len(ret), len(set(ret)))
        for key in ret:
            assert key, key
            check_ntuple(ret[key])

    def test_disk_io_counters_no_disks(self):
        # Emulate a case where no disks are installed, see:
        # https://github.com/giampaolo/psutil/issues/1062
        with mock.patch('psutil._psplatform.disk_io_counters',
                        return_value={}) as m:
            self.assertIsNone(psutil.disk_io_counters(perdisk=False))
            self.assertEqual(psutil.disk_io_counters(perdisk=True), {})
            assert m.called

    # can't find users on APPVEYOR or TRAVIS
    @unittest.skipIf(APPVEYOR or TRAVIS and not psutil.users(),
                     "unreliable on APPVEYOR or TRAVIS")
    def test_users(self):
        users = psutil.users()
        self.assertNotEqual(users, [])
        for user in users:
            assert user.name, user
            self.assertIsInstance(user.name, str)
            self.assertIsInstance(user.terminal, (str, type(None)))
            if user.host is not None:
                self.assertIsInstance(user.host, (str, type(None)))
            user.terminal
            user.host
            assert user.started > 0.0, user
            datetime.datetime.fromtimestamp(user.started)
            if WINDOWS or OPENBSD:
                self.assertIsNone(user.pid)
            else:
                psutil.Process(user.pid)

    def test_cpu_stats(self):
        # Tested more extensively in per-platform test modules.
        infos = psutil.cpu_stats()
        self.assertEqual(
            infos._fields,
            ('ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls'))
        for name in infos._fields:
            value = getattr(infos, name)
            self.assertGreaterEqual(value, 0)
            # on AIX, ctx_switches is always 0
            if not AIX and name in ('ctx_switches', 'interrupts'):
                self.assertGreater(value, 0)

    @unittest.skipIf(not HAS_CPU_FREQ, "not suported")
    def test_cpu_freq(self):
        def check_ls(ls):
            for nt in ls:
                self.assertEqual(nt._fields, ('current', 'min', 'max'))
                self.assertLessEqual(nt.current, nt.max)
                for name in nt._fields:
                    value = getattr(nt, name)
                    self.assertIsInstance(value, (int, long, float))
                    self.assertGreaterEqual(value, 0)

        ls = psutil.cpu_freq(percpu=True)
        if TRAVIS and not ls:
            raise self.skipTest("skipped on Travis")
        if FREEBSD and not ls:
            raise self.skipTest("returns empty list on FreeBSD")

        assert ls, ls
        check_ls([psutil.cpu_freq(percpu=False)])

        if LINUX:
            self.assertEqual(len(ls), psutil.cpu_count())

    @unittest.skipIf(not HAS_GETLOADAVG, "not supported")
    def test_getloadavg(self):
        loadavg = psutil.getloadavg()
        assert len(loadavg) == 3

        for load in loadavg:
            self.assertIsInstance(load, float)
            self.assertGreaterEqual(load, 0.0)

    def test_os_constants(self):
        names = [
            "POSIX", "WINDOWS", "LINUX", "MACOS", "FREEBSD", "OPENBSD",
            "NETBSD", "BSD", "SUNOS"
        ]
        for name in names:
            self.assertIsInstance(getattr(psutil, name), bool, msg=name)

        if os.name == 'posix':
            assert psutil.POSIX
            assert not psutil.WINDOWS
            names.remove("POSIX")
            if "linux" in sys.platform.lower():
                assert psutil.LINUX
                names.remove("LINUX")
            elif "bsd" in sys.platform.lower():
                assert psutil.BSD
                self.assertEqual(
                    [psutil.FREEBSD, psutil.OPENBSD,
                     psutil.NETBSD].count(True), 1)
                names.remove("BSD")
                names.remove("FREEBSD")
                names.remove("OPENBSD")
                names.remove("NETBSD")
            elif "sunos" in sys.platform.lower() or \
                    "solaris" in sys.platform.lower():
                assert psutil.SUNOS
                names.remove("SUNOS")
            elif "darwin" in sys.platform.lower():
                assert psutil.MACOS
                names.remove("MACOS")
        else:
            assert psutil.WINDOWS
            assert not psutil.POSIX
            names.remove("WINDOWS")

        # assert all other constants are set to False
        for name in names:
            self.assertIs(getattr(psutil, name), False, msg=name)

    @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
    def test_sensors_temperatures(self):
        temps = psutil.sensors_temperatures()
        for name, entries in temps.items():
            self.assertIsInstance(name, str)
            for entry in entries:
                self.assertIsInstance(entry.label, str)
                if entry.current is not None:
                    self.assertGreaterEqual(entry.current, 0)
                if entry.high is not None:
                    self.assertGreaterEqual(entry.high, 0)
                if entry.critical is not None:
                    self.assertGreaterEqual(entry.critical, 0)

    @unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
    def test_sensors_temperatures_fahreneit(self):
        d = {'coretemp': [('label', 50.0, 60.0, 70.0)]}
        with mock.patch("psutil._psplatform.sensors_temperatures",
                        return_value=d) as m:
            temps = psutil.sensors_temperatures(fahrenheit=True)['coretemp'][0]
            assert m.called
            self.assertEqual(temps.current, 122.0)
            self.assertEqual(temps.high, 140.0)
            self.assertEqual(temps.critical, 158.0)

    @unittest.skipIf(not HAS_SENSORS_BATTERY, "not supported")
    @unittest.skipIf(not HAS_BATTERY, "no battery")
    def test_sensors_battery(self):
        ret = psutil.sensors_battery()
        self.assertGreaterEqual(ret.percent, 0)
        self.assertLessEqual(ret.percent, 100)
        if ret.secsleft not in (psutil.POWER_TIME_UNKNOWN,
                                psutil.POWER_TIME_UNLIMITED):
            self.assertGreaterEqual(ret.secsleft, 0)
        else:
            if ret.secsleft == psutil.POWER_TIME_UNLIMITED:
                self.assertTrue(ret.power_plugged)
        self.assertIsInstance(ret.power_plugged, bool)

    @unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
    def test_sensors_fans(self):
        fans = psutil.sensors_fans()
        for name, entries in fans.items():
            self.assertIsInstance(name, str)
            for entry in entries:
                self.assertIsInstance(entry.label, str)
                self.assertIsInstance(entry.current, (int, long))
                self.assertGreaterEqual(entry.current, 0)
Example #45
0
import psutil

# 获取CPU信息
print(psutil.cpu_count())  # CPU逻辑数量
print(psutil.cpu_count(logical=False))  # CPU物理核心
print(psutil.cpu_times())  # 统计CPU的用户/系统/空闲时间

# 获取内存信息
print(psutil.virtual_memory())
print(psutil.swap_memory())

# 获取磁盘信息
print(psutil.disk_partitions())  # 磁盘分区信息
print(psutil.disk_usage('D:'))  # 磁盘使用情况
print(psutil.disk_io_counters())  # 磁盘IO

# 获取网络信息
print(psutil.net_io_counters())  # 获取网络读写字节/包的个数
print(psutil.net_if_addrs())  # 获取网络接口信息
print(psutil.net_if_stats())  # 获取网络接口状态
print(psutil.net_connections())  # 获取当前网络连接信息

# 获取进程信息
print(psutil.pids())  # 所有进程ID
p = psutil.Process(12196)  # 获取指定进程ID=12196
print(p.name())  # 进程名称
print(p.exe())  # 进程exe路径
print(p.cwd())  # 进程工作目录
print(p.cmdline())  # 进程启动的命令行
print(p.ppid())  # 父进程ID
Example #46
0
    def MakeObservation(self) -> None:
        """Record a new observation and update internal state."""
        # CPU.
        cpu_loads = np.array(psutil.cpu_percent(percpu=True)) / 100
        self.Update("cpu_load", np.average(cpu_loads))
        self.Update("cpu_load_max", np.max(cpu_loads))
        self.Update("cpu_freq_mhz", psutil.cpu_freq().current)

        # Memory.
        self.Update("memory_util", psutil.virtual_memory().percent / 100)
        self.Update("swap_util", psutil.swap_memory().percent / 100)

        # Counter-based stats.
        elapsed = time() - self.last_record_time
        disk_counters = psutil.disk_io_counters(perdisk=False)
        net_counters = psutil.net_io_counters(pernic=False)

        # Disk counters.
        self.Update(
            "disk_reads_per_sec",
            (disk_counters.read_count - self.prev_disk_counters.read_count) /
            elapsed,
        )
        self.Update(
            "disk_writes_per_sec",
            (disk_counters.write_count - self.prev_disk_counters.write_count) /
            elapsed,
        )

        self.Update(
            "disk_read_mb_per_sec",
            ((disk_counters.read_bytes - self.prev_disk_counters.read_bytes) /
             (1024 * 1024)) / elapsed,
        )
        self.Update(
            "disk_write_mb_per_sec",
            ((disk_counters.write_bytes - self.prev_disk_counters.write_bytes)
             / (1024 * 1024)) / elapsed,
        )

        # Network counters.
        self.Update(
            "net_packets_recv_per_sec",
            (net_counters.packets_recv - self.prev_net_counters.packets_recv) /
            elapsed,
        )
        self.Update(
            "net_packets_sent_per_sec",
            (net_counters.packets_sent - self.prev_net_counters.packets_sent) /
            elapsed,
        )

        self.Update(
            "net_data_recv_mb_per_sec",
            ((net_counters.bytes_recv - self.prev_net_counters.bytes_recv) /
             (1024 * 1024)) / elapsed,
        )
        self.Update(
            "net_data_sent_mb_per_sec",
            ((net_counters.bytes_sent - self.prev_net_counters.bytes_sent) /
             (1024 * 1024)) / elapsed,
        )

        # Update counters.
        self.last_record_time = time()
        self.prev_disk_counters = disk_counters
        self.prev_net_counters = net_counters

        # GPU stats.
        for gpu_data, gpu in zip(self.stats.get("gpus", []), GPUtil.getGPUs()):
            self.Update("load", gpu.load, data=gpu_data)
            self.Update("memory_util", gpu.memoryUtil, data=gpu_data)
            self.Update("temperature", gpu.temperature, data=gpu_data)

        self.stats["observation_count"] += 1

        # Call the user-provided callback, or list of callbacks.
        if callable(self.on_observation):
            self.on_observation(self)
        else:
            for callback in self.on_observation:
                callback(self)
Example #47
0
# -*- coding: UTF-8 -*-
#pip install psutil

import psutil

print 'file path ', psutil.__file__
mem = psutil.virtual_memory()
swap_mem = psutil.swap_memory()
cpu_times = psutil.cpu_times()
cpu_count = psutil.cpu_count()
cpu_total_time = cpu_times.user + cpu_times.system + cpu_times.idle
disk_info = psutil.disk_partitions()
disk_usage = psutil.disk_usage('/').percent
disk_io_count = psutil.disk_io_counters(perdisk=True)

print disk_io_count
print disk_usage
print disk_info
print 'swap_memory', swap_mem
print('memory_used[%.2f%%]' % (float(mem.used) / float(mem.total) * 100))

print 'cpu_count', cpu_count
print 'cpi_count,physic', psutil.cpu_count(logical=False)
print 'cpu_time_user[%.2f%%]' % (cpu_times.user / float(cpu_total_time) * 100)

print psutil.net_io_counters()
print psutil.net_io_counters(pernic=True)['en0']
print psutil.users()[0]
import datetime

print 'bootTime ', datetime.datetime.fromtimestamp(
Example #48
0
 def get_disk_io_counters():
     """ Method get disk io counters
     :rtype: object get disk io counters
     """
     return psutil.disk_io_counters()
Example #49
0
# see https://github.com/giampaolo/psutil

import psutil

### cpu
print('cpu_count', psutil.cpu_count(logical=False))
print(psutil.cpu_times())

### memory
print(psutil.virtual_memory())
print(psutil.swap_memory())

### disk
print(psutil.disk_partitions())
print(psutil.disk_usage('/'))
print(psutil.disk_io_counters(perdisk=False))

### network
print(psutil.net_io_counters(pernic=True))
print(psutil.net_if_stats())

### process
# pid = 1234
# p = psutil.Process(pid)
# print(p.name())
# print(p.ppid())
# print(p.cpu_times())
# print(p.memory_info())
# print(p.io_counters())
# print(p.open_files())
# print(p.connections())
Example #50
0
def run_adapter():

    model = ModelFactory.create(model_params.MODEL_PARAMS)
    model_mem = ModelFactory.create(meme_params.MODEL_PARAMS)
    model_disk = ModelFactory.create(disk_params.MODEL_PARAMS)
    model_disk_read = ModelFactory.create(disk_read_params.MODEL_PARAMS)
    model_net = ModelFactory.create(net_params.MODEL_PARAMS)
    model_mem.enableInference({"predictedField": "mem_active"})
    model_disk.enableInference({"predictedField": "diskwritebytes"})
    model_disk_read.enableInference({"predictedField": "diskreadbytes"})
    model_net.enableInference({"predictedField": "bytes_sent"})
    model.enableInference({"predictedField": "cpu"})
    adapter = 0
    attempt = 0
    filename = 'adapt.csv'
    fileHandle = open(filename, "w")
    writer = csv.writer(fileHandle)
    writer.writerow(["timestamp","cpu", "cpu prediction", "cpu_anomalyscore", "cpu_anamolyLikelihood", "Uc",\
      "mem_used", "mem_prediction", "mem_anomalyScore", "mem_anomalyLikelihood","Um",\
      "disk_write_bytes", "disk_prediction", "disk_anomalyScore","disk_anomalyLikelihood", "Udw",\
      "disk_read_bytes", "disk_read_prediction", "disk_read_anomalyScore","disk_read_anomalyLikelihood", "Udr",\
      "net_s", "net_s_prediction", "net_s_anomalyScore","net_s_anomalyLikelihood", "udnet",\
      "totalUM"])

    for row in range(1, 200):

        s = time.strftime(DATE_FORMAT)
        timestamp = datetime.datetime.strptime(s, DATE_FORMAT)
        net_s = psutil.net_io_counters(pernic=True)['eth0'].bytes_sent
        net_s = float(net_s) / 1073741824
        result_net_s = model_net.run({"bytes_sent": net_s})
        net_s_prediction = result_net_s.inferences["multiStepBestPredictions"][
            1]
        net_s_anomalyScore = result_net_s.inferences['anomalyScore']
        net_s_anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            net_s, net_s_anomalyScore, timestamp)
        print 'net_anomalyScore: ', net_s_anomalyScore, net_s
        udnet = (net_s_anomalyLikelihood * net_s +
                 net_s_anomalyLikelihood * net_s_prediction) / (
                     net_s_anomalyLikelihood + net_s_anomalyLikelihood)

        print 'udnet', net_s_anomalyLikelihood, udnet

        disk = psutil.disk_io_counters(perdisk=False).read_bytes
        disk_read_bytes = float(disk) / 1073741824
        result_disk_read = model_disk_read.run(
            {"diskreadbytes": disk_read_bytes})
        disk_read_prediction = result_disk_read.inferences[
            "multiStepBestPredictions"][1]
        disk_read_anomalyScore = result_disk_read.inferences['anomalyScore']
        disk_read_anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            result_disk_read, disk_read_anomalyScore, timestamp)
        print 'disk_read_anomalyScore: ', disk_read_anomalyLikelihood, disk_read_bytes
        udr = (disk_read_anomalyLikelihood * disk_read_bytes +
               disk_read_anomalyLikelihood * disk_read_prediction) / (
                   disk_read_anomalyLikelihood + disk_read_anomalyLikelihood)

        print 'udr', disk_read_anomalyLikelihood, udr

        disk = psutil.disk_io_counters(perdisk=False).write_bytes
        write_bytes = float(disk) / 1073741824
        result_disk = model_disk.run({"diskwritebytes": write_bytes})

        disk_prediction = result_disk.inferences["multiStepBestPredictions"][1]
        disk_anomalyScore = result_disk.inferences['anomalyScore']
        disk_anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            write_bytes, disk_anomalyScore, timestamp)
        print 'disk_anomalyLikelihood: ', disk_anomalyLikelihood, write_bytes
        udw = (disk_anomalyLikelihood * write_bytes +
               disk_anomalyLikelihood * disk_prediction) / (
                   disk_anomalyLikelihood + disk_anomalyLikelihood)

        print 'Udw', disk_anomalyLikelihood, udw

        mem = psutil.virtual_memory().percent
        mem_used = float(mem)
        result_mem = model_mem.run({"mem_active": mem_used})

        mem_prediction = result_mem.inferences["multiStepBestPredictions"][1]
        mem_anomalyScore = result_mem.inferences['anomalyScore']
        mem_anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            mem_used, mem_anomalyScore, timestamp)
        print 'mem_anomalyLikelihood: ', mem_anomalyLikelihood, mem_used
        print 'Um', mem_anomalyLikelihood * mem_used

        um = (mem_anomalyLikelihood * mem_used + mem_anomalyLikelihood *
              mem_prediction) / (mem_anomalyLikelihood + mem_anomalyLikelihood)
        #timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
        cpu1 = psutil.cpu_percent(interval=1)
        cpu = float(cpu1)
        result = model.run({"cpu": cpu})

        prediction = result.inferences["multiStepBestPredictions"][1]
        anomalyScore = result.inferences['anomalyScore']
        anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            cpu, anomalyScore, timestamp)
        print 'cpu anomalyLikelihood: ', anomalyLikelihood, cpu
        print 'Uc', anomalyLikelihood * cpu
        uc = (anomalyLikelihood * cpu + prediction * anomalyLikelihood) / (
            anomalyLikelihood + anomalyLikelihood)
        print 'totalUM: ', uc
        totalUM = (uc+um+udr+udw+udnet)/(anomalyLikelihood + mem_anomalyLikelihood + disk_anomalyLikelihood \
          + disk_read_anomalyLikelihood+net_s_anomalyLikelihood)

        if anomalyScore > 0.75:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            adapter = adapter + 20
            if adapter >= 300:
                run_adaptation_strategy_service(attempt, cpu,
                                                anomalyLikelihood)

                attempt += 1
                adapter = 0
                print "reset timer for new adaptation action"
        else:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            run_adaptation_strategy_service(attempt, cpu, anomalyLikelihood)
            attempt += 1

        try:
            plt.pause(SECONDS_PER_STEP)
        except:
            pass
        writer.writerow([timestamp, cpu, prediction, anomalyScore, anomalyLikelihood,uc,\
         mem_used, mem_prediction, mem_anomalyScore, mem_anomalyLikelihood, um,\
         write_bytes, disk_prediction, disk_anomalyScore, disk_anomalyLikelihood,udw,\
         disk_read_bytes, disk_read_prediction, disk_read_anomalyScore, disk_read_anomalyLikelihood,udr,\
         net_s, net_s_prediction, net_s_anomalyScore, net_s_anomalyLikelihood, udnet,\
         totalUM])
        row += 1
    fileHandle.close()
def main():
    # Make the layout less cluttered and allow bulk-changes to text formatting
    def Txt(text, **kwargs):
        return (sg.Text(text, font=('Helvetica 8'), **kwargs))

    # Update a Text Element
    def Txt_Update(window, key, value):
        window.FindElement(key).Update(value)

    # ----------------  Create Window  ----------------
    sg.ChangeLookAndFeel('Black')
    sg.SetOptions(element_padding=(0, 0), margins=(1, 1), border_width=0)

    def GraphColumn(name, key):
        return sg.Column(
            [
                [
                    Txt(name, key=key + 'TXT_'),
                ],
                [
                    sg.Graph(
                        (GRAPH_WIDTH, GRAPH_HEIGHT),
                        (0, 0),
                        (GRAPH_WIDTH, 100),
                        background_color='black',
                        key=key + 'GRAPH_',
                    )
                ],
            ],
            pad=(2, 2),
        )

    layout = [
        [
            sg.Text('System Status Dashboard' + ' ' * 18),
            sg.Button('',
                      image_data=red_x,
                      button_color=('black', 'black'),
                      key='Exit',
                      tooltip='Closes window')
        ],
        [
            GraphColumn('Net Out', '_NET_OUT_'),
            GraphColumn('Net In', '_NET_IN_')
        ],
        [
            GraphColumn('Disk Read', '_DISK_READ_'),
            GraphColumn('Disk Write', '_DISK_WRITE_')
        ],
        [
            GraphColumn('CPU Usage', '_CPU_'),
            GraphColumn('Memory Usage', '_MEM_')
        ],
    ]

    window = sg.Window(
        'PSG System Dashboard',
        keep_on_top=True,
        auto_size_buttons=False,
        grab_anywhere=True,
        no_titlebar=True,
        default_button_element_size=(12, 1),
        return_keyboard_events=True,
        alpha_channel=ALPHA,
        use_default_focus=False,
    ).Layout(layout).Finalize()

    # setup graphs & initial values
    netio = psutil.net_io_counters()
    net_graph_in = DashGraph(window.FindElement('_NET_IN_GRAPH_'),
                             netio.bytes_recv, '#23a0a0')
    net_graph_out = DashGraph(window.FindElement('_NET_OUT_GRAPH_'),
                              netio.bytes_sent, '#56d856')

    diskio = psutil.disk_io_counters()
    disk_graph_write = DashGraph(window.FindElement('_DISK_WRITE_GRAPH_'),
                                 diskio.write_bytes, '#be45be')
    disk_graph_read = DashGraph(window.FindElement('_DISK_READ_GRAPH_'),
                                diskio.read_bytes, '#5681d8')

    cpu_usage_graph = DashGraph(window.FindElement('_CPU_GRAPH_'), 0,
                                '#d34545')
    mem_usage_graph = DashGraph(window.FindElement('_MEM_GRAPH_'), 0,
                                '#BE7C29')

    print(psutil.cpu_percent(percpu=True))
    # ----------------  main loop  ----------------
    while (True):
        # --------- Read and update window once a second--------
        event, values = window.Read(timeout=1000)
        if event in (
                None, 'Exit'
        ):  # Be nice and give an exit, expecially since there is no titlebar
            break
        # ----- Network Graphs -----
        netio = psutil.net_io_counters()
        write_bytes = net_graph_out.graph_value(netio.bytes_sent)
        read_bytes = net_graph_in.graph_value(netio.bytes_recv)
        Txt_Update(window, '_NET_OUT_TXT_',
                   'Net out {}'.format(human_size(write_bytes)))
        Txt_Update(window, '_NET_IN_TXT_',
                   'Net In {}'.format(human_size(read_bytes)))
        # ----- Disk Graphs -----
        diskio = psutil.disk_io_counters()
        write_bytes = disk_graph_write.graph_value(diskio.write_bytes)
        read_bytes = disk_graph_read.graph_value(diskio.read_bytes)
        Txt_Update(window, '_DISK_WRITE_TXT_',
                   'Disk Write {}'.format(human_size(write_bytes)))
        Txt_Update(window, '_DISK_READ_TXT_',
                   'Disk Read {}'.format(human_size(read_bytes)))
        # ----- CPU Graph -----
        cpu = psutil.cpu_percent(0)
        cpu_usage_graph.graph_percentage_abs(cpu)
        Txt_Update(window, '_CPU_TXT_', '{0:2.0f}% CPU Used'.format(cpu))
        # ----- Memory Graph -----
        mem_used = psutil.virtual_memory().percent
        mem_usage_graph.graph_percentage_abs(mem_used)
        Txt_Update(window, '_MEM_TXT_', '{}% Memory Used'.format(mem_used))
Example #52
0
    def update(self):
        """Update disk I/O stats using the input method."""
        # Init new stats
        stats = self.get_init_value()

        if self.input_method == 'local':
            # Update stats using the standard system lib
            # Grab the stat using the psutil disk_io_counters method
            # read_count: number of reads
            # write_count: number of writes
            # read_bytes: number of bytes read
            # write_bytes: number of bytes written
            # read_time: time spent reading from disk (in milliseconds)
            # write_time: time spent writing to disk (in milliseconds)
            try:
                diskiocounters = psutil.disk_io_counters(perdisk=True)
            except Exception:
                return stats

            # Previous disk IO stats are stored in the diskio_old variable
            if not hasattr(self, 'diskio_old'):
                # First call, we init the diskio_old var
                try:
                    self.diskio_old = diskiocounters
                except (IOError, UnboundLocalError):
                    pass
            else:
                # By storing time data we enable Rx/s and Tx/s calculations in the
                # XML/RPC API, which would otherwise be overly difficult work
                # for users of the API
                time_since_update = getTimeSinceLastUpdate('disk')

                diskio_new = diskiocounters
                for disk in diskio_new:
                    # By default, RamFS is not displayed (issue #714)
                    if self.args is not None and not self.args.diskio_show_ramfs and disk.startswith(
                            'ram'):
                        continue

                    # Do not take hide disk into account
                    if self.is_hide(disk):
                        continue

                    # Compute count and bit rate
                    try:
                        read_count = (diskio_new[disk].read_count -
                                      self.diskio_old[disk].read_count)
                        write_count = (diskio_new[disk].write_count -
                                       self.diskio_old[disk].write_count)
                        read_bytes = (diskio_new[disk].read_bytes -
                                      self.diskio_old[disk].read_bytes)
                        write_bytes = (diskio_new[disk].write_bytes -
                                       self.diskio_old[disk].write_bytes)
                        diskstat = {
                            'time_since_update': time_since_update,
                            'disk_name': n(disk),
                            'read_count': read_count,
                            'write_count': write_count,
                            'read_bytes': read_bytes,
                            'write_bytes': write_bytes
                        }
                        # Add alias if exist (define in the configuration file)
                        if self.has_alias(disk) is not None:
                            diskstat['alias'] = self.has_alias(disk)
                    except KeyError:
                        continue
                    else:
                        diskstat['key'] = self.get_key()
                        stats.append(diskstat)

                # Save stats to compute next bitrate
                self.diskio_old = diskio_new
        elif self.input_method == 'snmp':
            # Update stats using SNMP
            # No standard way for the moment...
            pass

        # Update the stats
        self.stats = stats

        return self.stats
Example #53
0
 def poll(self):
     super(AgentDiskWriteBytesSystemStats, self).poll()
     disk_io_info = psutil.disk_io_counters()
     self._get_value(disk_io_info.write_bytes)
Example #54
0
def diskTabUpdate(self):
    disktemp = ps.disk_io_counters(perdisk=True)
    self.diskt2 = time()  ##
    timediskDiff = self.diskt2 - self.diskt1
    self.diskstate2 = []
    for i in range(0, self.numOfDisks):
        try:
            self.diskstate2.append(disktemp[self.disklist[i]])
            for j, part in enumerate(self.diskPartitions[i]):
                temp = ps.disk_usage(part[1])
                self.diskListStores[i].set(
                    self.diskListStoreItrs[i][j], 3,
                    byte_to_human(temp[0], persec=False), 4,
                    byte_to_human(temp[1], persec=False), 5, temp[3])
        except Exception as e:
            print(f"error in diskliststore: {e}")

    self.diskDiff, self.diskActiveString = [], []
    for i in range(0, self.numOfDisks):
        try:
            self.diskDiff.append([
                x2 - x1
                for x1, x2 in zip(self.diskstate1[i], self.diskstate2[i])
            ])

            self.diskActiveString.append(
                f'{int(self.diskDiff[i][8]/(10*timediskDiff))}%')

            self.diskWidgetList[i].diskactivelabelvalue.set_text(
                self.diskActiveString[i])
            self.diskWidgetList[i].diskreadlabelvalue.set_text(
                "{:.1f} MiB/s".format(self.diskDiff[i][2] /
                                      (timediskDiff * 1048576)))
            self.diskWidgetList[i].diskwritelabelvalue.set_text(
                "{:.1f} MiB/s".format(self.diskDiff[i][3] /
                                      (timediskDiff * 1048576)))

            if self.update_graph_direction:
                self.diskActiveArray[i].pop(0)
                self.diskActiveArray[i].append(
                    (self.diskDiff[i][8]) / (10 * timediskDiff))  ##

                self.diskReadArray[i].pop(0)
                self.diskReadArray[i].append(self.diskDiff[i][2] /
                                             (timediskDiff * 1048576))

                self.diskWriteArray[i].pop(0)
                self.diskWriteArray[i].append(self.diskDiff[i][3] /
                                              (timediskDiff * 1048576))
            else:
                self.diskActiveArray[i].pop()
                self.diskActiveArray[i].insert(0, (self.diskDiff[i][8]) /
                                               (10 * timediskDiff))  ##

                self.diskReadArray[i].pop()
                self.diskReadArray[i].insert(
                    0, self.diskDiff[i][2] / ((timediskDiff) * 1048576))

                self.diskWriteArray[i].pop()
                self.diskWriteArray[i].insert(
                    0, self.diskDiff[i][3] / ((timediskDiff) * 1048576))

                self.diskWidgetList[i].givedata(self, i)
        except Exception as e:
            print(f'error in  disk update: {e}')

    self.diskstate1 = self.diskstate2
    #print(self.diskt2-self.diskt1)
    self.diskt1 = self.diskt2
Example #55
0
# Import psutil python3 module for checking CPU usage as well as the I/O and network bandwidth.
import psutil
psutil.cpu_percent()

'''
This shows that CPU utilization is low. Here, you have a CPU with multiple cores; this means one fully loaded CPU thread/virtual core equals 1.2% of total load. So, it only uses one core of the CPU regardless of having multiple cores.

After checking CPU utilization, you noticed that they're not reaching the limit.

So, you check the CPU usage, and it looks like the script only uses a single core to run. But your server has a bunch of cores, which means the task is CPU-bound.

Now, using psutil.disk_io_counters() and psutil.net_io_counters() you'll get byte read and byte write for disk I/O and byte received and byte sent for the network I/O bandwidth. For checking disk I/O, you can use the following command:
'''

psutil.disk_io_counters()

# For checking the network I/O bandwidth:
psutil.net_io_counters()


#Basics rsync command

rsync [Options] [Source-Files-Dir] [Destination]
Options
Uses
-v
Verbose output
-q
Suppress message output
-a
Example #56
0
 def poll(self):
     super(AgentDiskReadByesSystemStats, self).poll()
     disk_io_info = psutil.disk_io_counters()
     self._get_value(disk_io_info.read_bytes)
Example #57
0
    def GeneratePsUtilStatsProto():
        sample_period = 1.0

        result = psutil_stats_pb2.PsUtilStats()

        # Time to begin collection
        PsUtilLogger._PopulateTimestamp(result.collect_begin_time)

        cpu_times_fields = [
            "user", "system", "idle", "nice", "iowait", "irq", "softirq",
            "steal", "guest", "guest_nice", "interrupt", "dpc"
        ]

        # cpu_times
        PsUtilLogger._CopyFieldsIfExist(result.cpu_times,
                                        psutil.cpu_times(percpu=False),
                                        cpu_times_fields)

        # cpu_times_per_cpu
        for per_cpu_cpu_times in psutil.cpu_times(percpu=True):
            PsUtilLogger._CopyFieldsIfExist(result.cpu_times_per_cpu.add(),
                                            per_cpu_cpu_times,
                                            cpu_times_fields)

        # cpu_percent
        result.cpu_percent_interval = sample_period
        result.cpu_percent = psutil.cpu_percent(
            interval=result.cpu_percent_interval, percpu=False)

        # cpu_percent_per_cpu
        result.cpu_percent_per_cpu_interval = sample_period
        result.cpu_percent_per_cpu.extend(
            psutil.cpu_percent(interval=result.cpu_percent_per_cpu_interval,
                               percpu=True))

        cpu_times_percent_fields = [
            "user", "system", "idle", "nice", "iowait", "irq", "softirq",
            "steal", "guest", "guest_nice", "interrupt", "dpc"
        ]

        # cpu_times_percent
        result.cpu_times_percent_interval = sample_period
        PsUtilLogger._CopyFieldsIfExist(
            result.cpu_times_percent,
            psutil.cpu_times_percent(
                percpu=False, interval=result.cpu_times_percent_interval),
            cpu_times_percent_fields)

        # cpu_times_percent_per_cpu
        result.cpu_times_percent_per_cpu_interval = sample_period
        for per_cpu_cpu_times_percent in psutil.cpu_times_percent(
                percpu=True,
                interval=result.cpu_times_percent_per_cpu_interval):
            PsUtilLogger._CopyFieldsIfExist(
                result.cpu_times_percent_per_cpu.add(),
                per_cpu_cpu_times_percent, cpu_times_percent_fields)

        # cpu_count_physical
        result.cpu_count_physical = psutil.cpu_count(logical=False)

        # cpu_count_logical
        result.cpu_count_logical = psutil.cpu_count(logical=True)

        # cpu_stats
        PsUtilLogger._CopyFieldsIfExist(
            result.cpu_stats, psutil.cpu_stats(),
            ["ctx_switches", "interrupts", "soft_interrupts", "syscalls"])

        cpu_freq_fields_map = {
            "current_val": "current",
            "min_val": "min",
            "max_val": "max"
        }

        # cpu_freq
        PsUtilLogger._CopyFieldsIfExist(result.cpu_freq,
                                        psutil.cpu_freq(percpu=False),
                                        cpu_freq_fields_map)

        # cpu_freq_per_cpu
        for per_cpu_cpu_freq in psutil.cpu_freq(percpu=True):
            PsUtilLogger._CopyFieldsIfExist(result.cpu_freq_per_cpu.add(),
                                            per_cpu_cpu_freq,
                                            cpu_freq_fields_map)

        # virtual_memory
        virtual_memory_fields = [
            "total", "available", "used", "free", "active", "inactive",
            "buffers", "cached", "shared", "slab", "wired"
        ]
        PsUtilLogger._CopyFieldsIfExist(result.virtual_memory,
                                        psutil.virtual_memory(),
                                        virtual_memory_fields)

        # swap_memory
        swap_memory_fields = [
            "total", "used", "free", "percent", "sin", "sout"
        ]
        PsUtilLogger._CopyFieldsIfExist(result.swap_memory,
                                        psutil.swap_memory(),
                                        swap_memory_fields)

        disk_io_counters_fields = [
            "read_count", "write_count", "read_bytes", "write_bytes",
            "read_time", "write_time", "busy_time", "read_merged_count",
            "write_merged_count"
        ]

        # disk_io_counters
        PsUtilLogger._CopyFieldsIfExist(result.disk_io_counters,
                                        psutil.disk_io_counters(perdisk=False),
                                        disk_io_counters_fields)

        # disk_io_counters_per_disk
        for disk_name, per_disk_disk_io_counters in (psutil.disk_io_counters(
                perdisk=True).items()):
            PsUtilLogger._CopyFieldsIfExist(
                result.disk_io_counters_per_disk[disk_name],
                per_disk_disk_io_counters, disk_io_counters_fields)

        net_io_counters_fields = [
            "bytes_sent", "bytes_recv", "packets_sent", "packets_recv",
            "errin", "errout", "dropin", "dropout"
        ]

        # net_io_counters
        PsUtilLogger._CopyFieldsIfExist(result.net_io_counters,
                                        psutil.net_io_counters(pernic=False),
                                        net_io_counters_fields)

        # net_io_counters_per_nic
        for net_name, per_nic_net_io_counters in (psutil.net_io_counters(
                pernic=True).items()):
            PsUtilLogger._CopyFieldsIfExist(
                result.net_io_counters_per_nic[net_name],
                per_nic_net_io_counters, net_io_counters_fields)

        # boot_time
        PsUtilLogger._PopulateTimestamp(result.boot_time, psutil.boot_time())

        # Time to end collection
        PsUtilLogger._PopulateTimestamp(result.collect_end_time)
        return result
Example #58
0
def procUpdate(self):
    pids=ps.pids()
    # new process appending
    for pi in pids:
        if pi not in self.processList:  # and pi>self.systemdId changed for mutliple user 
            # print('my process')
            try:
                proc=ps.Process(pi)
                if '/libexec/' not in "".join(proc.cmdline()) and 'daemon' not in "".join(proc.cmdline()) and 'dbus' not in "".join(proc.cmdline()) :
                    parents_processess=proc.parents()
                    for systemdproc in self.processSystemd:
                        if systemdproc in parents_processess:
                            for parent in parents_processess:
                                cpu_percent=proc.cpu_percent()/ps.cpu_count()
                                cpu_percent="{:.1f}".format(cpu_percent)+' %'

                                mem_info=proc.memory_info()
                                rss='{:.1f}'.format(mem_info[0]/mibdevider)+' MiB'
                                shared='{:.1f}'.format(mem_info[2]/mibdevider)+' MiB'
                                mem_util=(mem_info[0]-mem_info[2])/mibdevider
                                mem_util='{:.1f}'.format(mem_util)+' MiB'

                                if parent.pid in self.processList:
                                    itr=self.processTreeStore.append(self.processTreeIterList[parent.pid],[proc.pid,proc.name(),
                                    cpu_percent,cpu_percent,mem_util,mem_util,'0 KB/s','0 KB/s','0 KB/s','0 KB/s',rss,shared,proc.username()
                                    ," ".join(proc.cmdline()),icon_finder(proc)])
                                    self.processTreeIterList[pi]=itr
                                    self.processList[pi]=proc
                                    self.processChildList[parent.pid].append(pi)
                                    self.processChildList[pi]=[]

                                    self.procDiskprev[pi]=[0,0]     ##
                                    self.procT1[pi]=0
                                    print('appending',pi)
                                    break
                                elif '/libexec/' not in "".join(parent.cmdline()) and 'daemon' not in "".join(parent.cmdline()) and 'dbus' not in "".join(parent.cmdline()):
                                    itr=self.processTreeStore.append(None,[proc.pid,proc.name(),
                                    cpu_percent,cpu_percent,mem_util,mem_util,'0 KB/s','0 KB/s','0 KB/s','0 KB/s',rss,shared,proc.username()
                                    ," ".join(proc.cmdline()),icon_finder(proc)])
                                    self.processTreeIterList[pi]=itr
                                    self.processList[pi]=proc
                                    self.processChildList[pi]=[]

                                    self.procDiskprev[pi]=[0,0]  ##
                                    self.procT1[pi]=0
                                    print('appending',pi)
                                    break
                            break
            except:
                print('some error in appending')

    # updating 
    tempdi=self.processList.copy()
    for pidds in reversed(tempdi):
        itr=self.processTreeIterList[pidds]
        try:
            if pidds not in pids:
                # childremover(self,pidds)
                self.processTreeStore.remove(itr)
                self.processList.pop(pidds)
                self.processTreeIterList.pop(pidds)
                tempchi=self.processChildList.copy()

                self.procDiskprev.pop(pidds)
                for key in tempchi:
                    if key==pidds:
                        self.processChildList.pop(pidds)
                    else:
                        if pidds in self.processChildList[key]:
                            self.processChildList[key].remove(pidds)
                            
                print('poping',pidds)
            else:
                cpu_percent=self.processList[pidds].cpu_percent()/ps.cpu_count()
                cpu_percent="{:.1f}".format(cpu_percent)+' %'

                mem_info=self.processList[pidds].memory_info()
                rss='{:.1f}'.format(mem_info[0]/mibdevider)+' MiB'
                shared='{:.1f}'.format(mem_info[2]/mibdevider)+' MiB'
                mem_util=(mem_info[0]-mem_info[2])/mibdevider
                mem_util='{:.1f}'.format(mem_util)+' MiB'
                # prev=float(self.processTreeStore.get_value(self.processTreeIterList[pidds],6)[:-5])
                
                currArray=self.processList[pidds].io_counters()[2:4]
                procT2=time.time()
                wspeed=(currArray[1]-self.procDiskprev[pidds][1])/(procT2-self.procT1[pidds])
                wspeed=byte_to_human(wspeed)
                rspeed=(currArray[0]-self.procDiskprev[pidds][0])/(procT2-self.procT1[pidds])
                rspeed=byte_to_human(rspeed)

                self.processTreeStore.set(itr,2,cpu_percent,3,cpu_percent,4,mem_util,5,mem_util,6,rspeed,7,rspeed,8,wspeed,9,wspeed,10,rss,11,shared)

                self.procDiskprev[pidds]=currArray[:]
                self.procT1[pidds]=procT2
        except:
            print('error in process updating')

    # print(self.processChildList)
    #recursive calculations
    for pid in reversed(self.processChildList):
        # print(pid)
        rcpu_percent=0
        rmem_util=0
        for childId in self.processChildList[pid]:
            cpu_percent=self.processTreeStore.get_value(self.processTreeIterList[childId],2)
            cpu_percent=float(cpu_percent[:-2])
            mem_util=self.processTreeStore.get_value(self.processTreeIterList[childId],4)
            mem_util=float(mem_util[:-3])
            # self.processTreeStore.set(self.processTreeIterList[childId],2,cpu_percent)
            rcpu_percent+=cpu_percent
            rmem_util+=mem_util
        if pid in self.processTreeIterList:
            cpu_percent=self.processTreeStore.get_value(self.processTreeIterList[pid],3)
            cpu_percent=float(cpu_percent[:-2])
            self.processTreeStore.set(self.processTreeIterList[pid],2,"{:.1f}".format(rcpu_percent+cpu_percent)+' %')
            mem_util=self.processTreeStore.get_value(self.processTreeIterList[pid],5)
            mem_util=float(mem_util[:-3])
            self.processTreeStore.set(self.processTreeIterList[pid],4,"{:.1f}".format(rmem_util+mem_util)+' MiB')

    self.column_header_labels[3].set_text('{0} %\nCPU'.format(self.cpuUtil))
    self.column_header_labels[2].set_text('{0} %\nrCPU'.format(self.cpuUtil))
    self.column_header_labels[4].set_text('{0} %\nrMemory'.format(self.memPercent))
    self.column_header_labels[5].set_text('{0} %\nMemory'.format(self.memPercent))
    
    ## Total disk io for all disks
    diskio=ps.disk_io_counters()
    diskTotalT2=time.time()
    totalrspeed=(diskio[2]-self.diskTotalState1[0])/(diskTotalT2-self.diskTotalT1)
    totalwspeed=(diskio[3]-self.diskTotalState1[1])/(diskTotalT2-self.diskTotalT1)

    self.column_header_labels[7].set_text('{0}\nDiskRead'.format(byte_to_human(totalrspeed)))
    self.column_header_labels[9].set_text('{0}\nDiskWrite'.format(byte_to_human(totalwspeed)))

    self.diskTotalState1=diskio[2:4]
    self.diskTotalT1=diskTotalT2

    return True

    
                
Example #59
0
ps.cpu_times()
ps.cpu_times(percpu=True)   # 显示cpu的逻辑信息
ps.cpu_times().user         # 查看用户的CPU时间比
ps.cpu_count()              # CPU的核心个数

# 查看内存信息
ps.virtual_memory()         # 内存的所有信息
ps.virtual_memory().total   # 总计内存
ps.virtual_memory().used    # 已使用内存
ps.virtual_memory().free    # 空闲内存

# 查看交换内存信息
ps.swap_memory()

# 查看磁盘信息
ps.disk_io_counters()       # 查看磁盘IO信息
ps.disk_partitions()        # 磁盘完整信息
ps.disk_useage("/")         # 查看分区表参数
ps.disk_io_counters(perdisk=True)   # 获取单个分区IO个数

# 查看网络信息
ps.net_io_counters()        # 查看网络总IO信息
ps.net_io_counters(pernic=True)     # 每个接口信息

# 当前登录用户的登录信息
ps.users()

# 开机时间
ps.boot_time()

# 系统进程管理
Example #60
-1
def main():
    try:
        cpus = psutil.cpu_percent(interval=0,percpu=True)
        header=[]
	cpu_sum = []
        for i in range(0, len(cpus)):
            header.append('Core-' + str(i) + '_usage_percent')
        
        f = open(options.filename, 'w')
        f.write('Total_CPU_usage_percent,Memory_usage_percent,Throughput_in_Mbps,Throughput_out_Mbps,Total_CPU_usage_iowait_percent' + ',' + ",".join(header) + "," + "Read_speed_MBps" + "," + "Write_speed_MBps" + "," + "Avg_CPU_usage"+'\n')
        interval = 1
        bytes_received_prev, bytes_sent_prev = get_network_bytes(options.interface)
	bytes_read_prev = psutil.disk_io_counters(perdisk=False).read_bytes
	bytes_written_prev = psutil.disk_io_counters(perdisk=False).write_bytes
        prev_time = time.time()
        time.sleep(interval)
        
        while 1:
            cpu = str(psutil.cpu_percent(interval=0))
            mem = str(psutil.virtual_memory().percent)
            cpus = psutil.cpu_percent(interval=0,percpu=True)
	    cpu_iowait = str(psutil.cpu_times_percent(interval=0,percpu=False).iowait)
            bytes_received_curr, bytes_sent_curr = get_network_bytes(options.interface)
	    bytes_read_curr = psutil.disk_io_counters(perdisk=False).read_bytes
	    bytes_written_curr = psutil.disk_io_counters(perdisk=False).write_bytes

            # calculate elapsed time
            curr_time = time.time()
            elapsed_time = curr_time - prev_time
            prev_time = curr_time;

	    # calculate network throughput
            throughput_in = (((bytes_received_curr - bytes_received_prev) * 8.0) / elapsed_time) / 1000000.0
            throughput_out = (((bytes_sent_curr - bytes_sent_prev) * 8.0) / elapsed_time) / 1000000.0
	    
	    # calculate disk throughput
	    disk_io_read = ((float)  (bytes_read_curr - bytes_read_prev) / (1024.0 * 1024.0)) / elapsed_time
	    disk_io_write = ((float) (bytes_written_curr - bytes_written_prev) / (1024.0 * 1024.0)) / elapsed_time

	    # update variables
            bytes_received_prev = bytes_received_curr
            bytes_sent_prev = bytes_sent_curr
	    bytes_read_prev = bytes_read_curr
	    bytes_written_prev = bytes_written_curr

	    # round throughputs to 5 decimals
            incoming = str(round(throughput_in,5))
            outgoing = str(round(throughput_out,5))
	    read = str(round(disk_io_read,5))
	    write = str(round(disk_io_write,5))
	    cpu_sum.append(float(cpu))
	    cpu_avg = str(round(sum(cpu_sum)/float(len(cpu_sum)),5))

	    # append record to csv file
            f.write(cpu + ',' + mem + ',' + incoming + ',' + outgoing + ',' + cpu_iowait + ',' + ",".join(map(str,cpus)) + "," + read + "," + write + "," + cpu_avg + '\n')
            f.flush()
            time.sleep(interval)
    except (KeyboardInterrupt, SystemExit):
        print "Exiting."
        pass