def _crawl_load(self):

        assert(self.crawl_mode is not Modes.OUTCONTAINER)

        logger.debug('Crawling system load')
        feature_key = 'load'

        try:
            shortterm = os.getloadavg()[0]
        except Exception as e:
            shortterm = 'unknown'
        try:
            midterm = os.getloadavg()[1]
        except Exception as e:
            midterm = 'unknown'
        try:
            longterm = os.getloadavg()[2]
        except Exception as e:
            longterm = 'unknown'

        feature_attributes = LoadFeature(shortterm, midterm, longterm)

        try:
            yield (feature_key, feature_attributes)
        except Exception as e:
            logger.error('Error crawling load', exc_info=True)
            raise CrawlError(e)
    def collect_job():
        config = utils.get_config()
        disks = config[utils.DISK_SECTION]
        interfaces = config[utils.INET_SECTION]
        account = Account(config[utils.GENERAL_SECTION].get('email'),
                          config[utils.GENERAL_SECTION].get('user_key'),
                          config[utils.GENERAL_SECTION].get('api_key'))

        report = {}
        usage = {}
        net = {}

        if os.name == 'nt':
            report['os'] = platform.system()+"-"+platform.win32_ver()[0]+" "+platform.win32_ver()[2]
            report['arch'] = platform.architecture()[0]
        else:
            report['loadAverage'] = {}
            if not os.name == 'nt':
                for idx, la in enumerate(os.getloadavg()):
                    time_la = "1" if idx == 0 else "5" if idx == 2 else "15"
                    report['loadAverage'][time_la] = "{0:.2f}".format(la)
            if platform.system() == 'Linux':
                report['os'] = platform.linux_distribution()[0]+"-"+platform.linux_distribution()[1]+" "+platform.linux_distribution()[2]
                report['arch'] = platform.architecture()[0]
            else:
                report['os'] = "Mac OS X - "+platform.mac_ver()[0]
                report['arch'] = platform.architecture()[0]

        for disk in disks.keys():
            if disks[disk] == utils.ENABLED and check_disk(disk):
                usage_temp = psutil.disk_usage(disk)
                usage[disk] = {'total': usage_temp.total, 'used': usage_temp.used, 'free': usage_temp.free,
                               'percentage': usage_temp.percent}
        for interf in interfaces.keys():
                if interfaces[interf] == utils.ENABLED:
                    net_temp = dict((k.lower(),v) for k, v in psutil.net_io_counters(pernic=True).iteritems())[interf]
                    net[interf] = {'sent': net_temp.bytes_sent, 'recv': net_temp.bytes_recv}
        report['inet'] = net
        report['disks'] = usage
        report['processes'] = {'value': len(psutil.pids())}

        report['loadAverage'] = {}
        if not os.name == 'nt':
            for idx, la in enumerate(os.getloadavg()):
                time_la = "1" if idx == 0 else "5" if idx == 2 else "15"
                report['loadAverage'][time_la] = "{0:.2f}".format(la)
        report['users'] = {'value': len(psutil.users())}
        report['uptime'] = str(datetime.now() - datetime.fromtimestamp(psutil.boot_time())).split('.')[0]
        report['kindDevice'] = 3

        api_key = account.api_key
        url = "%s/%s" % (system_config['ROUTES'].get('collect'), config[utils.GENERAL_SECTION].get('serial'))

        params = {'apiKey': api_key, 'data': json.dumps(report)}

        try:
            response = http.request('POST', url, params, {'user-key': account.user_key}, encode_multipart=False)
        except Exception, e:
            console.error("Check your connection")
            return
Example #3
0
 def _throttle_if_overloaded(self):
     global interrupted
     if interrupted or got_sighup:
         return
     if self._config["slave-load-max"] is None:
         return
     load_max = float(self._config["slave-load-max"])
     if load_max < 1.0:
         return
     if os.getloadavg()[0] <= load_max:
         return
     load_resume = max(load_max - 1.0, 0.9)
     secs = random.randrange(30, 90)
     self._slave.close()
     while True:
         load = os.getloadavg()[0]
         if load <= load_resume:
             break
         logging.info("Sleeping due to high load (%.2f)" % load)
         try:
             time.sleep(secs)
         except KeyboardInterrupt:
             interrupted = True
         if interrupted or got_sighup:
             break
         if secs < 300:
             secs += random.randrange(30, 90)
Example #4
0
def main():
   args =  Options ()
   ColorScheme (args.color)
   sections=args.section
   if ( len(sections) == 0 ):
     sections = ['header', 'hw', 'load', 'net' , 'netsrv', 'security', 'agents']
   print TITLE + "=============================================================" + DEFAULT
   print('{:^61}'.format(version))
   print TITLE + "=============================================================" + DEFAULT
   for i in range(len(sections)):
# HEADER
        if ( sections[i] == 'header' ):
	   hostname=gethostname()
	   row('NAME', hostname)
	   now=time.strftime("%Y-%m-%d %H:%M (%Z)")
	   row('DATE',now)
           loadavg=os.getloadavg()
           uptime=countuptime() + " " + str(os.getloadavg())
	   row('UPTIME', uptime)
	   platf=' '.join(platform.linux_distribution())
	   row('OS', platf)
	   kernelv=platform.platform()
	   row('KERNEL',kernelv)
# HARDWARE
        if ( sections[i] == 'hw' ):
           title('HARDWARE')
           dmidecode()
           cpuinfo()
           meminfo()
           disk()
def get_load():

	stattime = int(time.time())
	yaketyfile_buffer = yaketydir, str(stattime), ".", hostname, ".load"
	yaketyfile = ''.join(yaketyfile_buffer)

	rrd_type = "GAUGE"
	rrd_interval = "60"

	load1 = os.getloadavg()[0]
	load5 = os.getloadavg()[1]
	load15 = os.getloadavg()[2]

	#Write Yaketystats output
	output_load1 = "p=/", hostname, "/load/1-minute", seperator, "t=", rrd_type, seperator, "i=", str(rrd_interval), seperator, "ts=", str(stattime), seperator, "v=", str(load1), "\n"
	output_load5 = "p=/", hostname, "/load/5-minute", seperator, "t=", rrd_type, seperator, "i=", str(rrd_interval), seperator, "ts=", str(stattime), seperator, "v=", str(load5), "\n"
	output_load15 = "p=/", hostname, "/load/15-minute", seperator, "t=", rrd_type, seperator, "i=", str(rrd_interval), seperator, "ts=", str(stattime), seperator, "v=", str(load15), "\n"
	load1_buffer = ''.join(output_load1)
	load5_buffer = ''.join(output_load5)
	load15_buffer = ''.join(output_load15)

	load_outfile = open (yaketyfile, "a")
	load_outfile.write(load1_buffer)
	load_outfile.write(load5_buffer)
	load_outfile.write(load15_buffer)
	load_outfile.close()
Example #6
0
def processTransfer():
    try:
        conn = psycopg2.connect(dbConnectStr)
        cur = conn.cursor()
        zk = KazooClient(hosts=zkHost)
        zk.start()
        transferq = LockingQueue(zk, '/transfer/')
        while True:
            rawCode = transferq.get()
            proposal = rawCode.decode().strip()
            transferq.consume()

            # print(" proposal = {0} ".format(proposal))
            ints = datetime.now()
            inload = os.getloadavg()[0]
            pro1 = Popen(['/usr/bin/python36', './processproptran.py', proposal], stdin=None, stdout=None)
            pro1.wait()

            outts = datetime.now()
            outload = os.getloadavg()[0]
            # insert the runtime info into c*
            cluster = Cluster(cfg.cassCluster)
            session = cluster.connect(cfg.cassKeyspace)
            stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload)
            values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY)
            session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload))
    except psycopg2.Error as err:
        print("SQLError {0}".format(err))
    finally:
        zk.stop()
        zk.close()
        cur.close()
        conn.close()
Example #7
0
def benchmark_and_compare(branch, commit, last_commit, args, executableName, md5sum, compilerVersion, resultsFileName,
                          testFilePath, fileName, last_csize, last_cspeed, last_dspeed):
    sleepTime = 30
    while os.getloadavg()[0] > args.maxLoadAvg:
        log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds"
            % (os.getloadavg()[0], args.maxLoadAvg, sleepTime))
        time.sleep(sleepTime)
    start_load = str(os.getloadavg())
    result = execute('programs/%s -rqi5b1e%s %s' % (executableName, args.lastCLevel, testFilePath), print_output=True)   
    end_load = str(os.getloadavg())
    linesExpected = args.lastCLevel + 1
    if len(result) != linesExpected:
        raise RuntimeError("ERROR: number of result lines=%d is different that expected %d\n%s" % (len(result), linesExpected, '\n'.join(result)))
    with open(resultsFileName, "a") as myfile:
        myfile.write('%s %s %s md5=%s\n' % (branch, commit, compilerVersion, md5sum))
        myfile.write('\n'.join(result) + '\n')
        myfile.close()
        if (last_cspeed == None):
            log("WARNING: No data for comparison for branch=%s file=%s " % (branch, fileName))
            return ""
        commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
        text = ""
        for i in range(0, min(len(cspeed), len(last_cspeed))):
            print("%s:%s -%d cSpeed=%6.2f cLast=%6.2f cDiff=%1.4f dSpeed=%6.2f dLast=%6.2f dDiff=%1.4f ratioDiff=%1.4f %s" % (branch, commit, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], float(last_csize[i])/csize[i], fileName))
            if (cspeed[i]/last_cspeed[i] < args.lowerLimit):
                text += "WARNING: %s -%d cSpeed=%.2f cLast=%.2f cDiff=%.4f %s\n" % (executableName, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], fileName)
            if (dspeed[i]/last_dspeed[i] < args.lowerLimit):
                text += "WARNING: %s -%d dSpeed=%.2f dLast=%.2f dDiff=%.4f %s\n" % (executableName, i+1, dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], fileName)
            if (float(last_csize[i])/csize[i] < args.ratioLimit):
                text += "WARNING: %s -%d cSize=%d last_cSize=%d diff=%.4f %s\n" % (executableName, i+1, csize[i], last_csize[i], float(last_csize[i])/csize[i], fileName)
        if text:
            text = args.message + ("\nmaxLoadAvg=%s  load average at start=%s end=%s\n%s  last_commit=%s  md5=%s\n" % (args.maxLoadAvg, start_load, end_load, compilerVersion, last_commit, md5sum)) + text
        return text
Example #8
0
	def event_loop(self):
		start = time.time()
		lastsave = lastmute = lastidle = lastload = start
		load = 0
		try:
			os.getloadavg()
			hasload = True
		except:
			hasload = False
		while self.running:
			now = time.time()
			if hasload and now - lastload > 1: # only fetch load when available & last fetch was >1 sec ago
				lastload = now
				load, _, _ = os.getloadavg()
			try:
				if load > 8: # if load is higher than this, skip some steps
					pass
				elif now - lastmute >= 1:
					lastmute = now
					self.mute_timeout_step()
				elif now - lastidle > 10:
					lastidle = now
					self.idle_timeout_step()
				else:
					self.console_print_step()
			except:
				self.error(traceback.format_exc())	
				
			time.sleep(max(0.1, 1 - (time.time() - start)))
Example #9
0
def benchmark_and_compare(branch, commit, resultsFileName, lastCLevel, testFilePath, fileName, last_cspeed, last_dspeed, lower_limit, maxLoadAvg, message):
    sleepTime = 30
    while os.getloadavg()[0] > maxLoadAvg:
        log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds" % (os.getloadavg()[0], maxLoadAvg, sleepTime))
        time.sleep(sleepTime)
    start_load = str(os.getloadavg())
    result = execute('programs/zstd -qi5b1e' + str(lastCLevel) + ' ' + testFilePath)
    end_load = str(os.getloadavg())
    linesExpected = lastCLevel + 2;
    if len(result) != linesExpected:
        log("ERROR: number of result lines=%d is different that expected %d" % (len(result), linesExpected))
        return ""
    with open(resultsFileName, "a") as myfile:
        myfile.write(branch + " " + commit + "\n")
        myfile.writelines(result)
        myfile.close()
        if (last_cspeed == None):
            return ""
        commit, cspeed, dspeed = get_last_commit(resultsFileName)
        text = ""
        for i in range(0, min(len(cspeed), len(last_cspeed))):
            if (cspeed[i]/last_cspeed[i] < lower_limit):
                text += "WARNING: File=%s level=%d cspeed=%s last=%s diff=%s\n" % (fileName, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i])
            if (dspeed[i]/last_dspeed[i] < lower_limit):
                text += "WARNING: File=%s level=%d dspeed=%s last=%s diff=%s\n" % (fileName, i+1, dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i])
        if text:
            text = message + ("\nmaxLoadAvg=%s  load average at start=%s end=%s\n" % (maxLoadAvg, start_load, end_load)) + text
        return text
Example #10
0
def processSymbol():
    try:
        conn = psycopg2.connect(dbConnectStr)
        cur = conn.cursor()
        zk = KazooClient(hosts=zkHost)
        zk.start()
        symbolq = LockingQueue(zk, cfg.symbol)
        while True:
            rawCode = symbolq.get()            
            ints = datetime.now()
            inload = os.getloadavg()[0]

            symbol = rawCode.decode().split('||')[0]
            globalId = rawCode.decode().split('||')[1]
            symbolq.consume()

            alias = ''
            while not alias:
                print("loop for the alias of {0}".format(globalId))
                cur.execute("""
                select alias from player0 where "globalId" = %s
                """, [globalId])
                res = cur.fetchone()
                conn.commit()
                if res:
                    alias = res[0]

            print("process symbol:{0} alias:{1} globalId:{2}".format(symbol, alias, globalId))
            lock0 = zk.Lock(symbol, 'jg')
            with lock0:
                # the operation
                cmd = "cd /{4}/;{3}/openssl-1.0.2o/apps/openssl genrsa 2048| {3}/openssl-1.0.2o/apps/openssl asn1parse|/{4}/parseoutput.pl; /usr/bin/perl /{4}/makeissuer.pl '{0}' '{1}' '{2}'".format(alias, symbol, globalId, baseDir, workshopInstance)
                f = os.popen(cmd)
                while True:
                    en = f.readline()
                    if en == '':
                        break
                cur.execute("""
                update symbol_redo set progress= 0, setup=now() where symbol=%s
                """, [symbol])
                conn.commit()
                outts = datetime.now()
                outload = os.getloadavg()[0]
                # insert the runtime info into c*
                cluster = Cluster(cfg.cassCluster)
                session = cluster.connect(cfg.cassKeyspace)
                stmt = SimpleStatement("""insert into runstat(id,executable,ints,inload,outts,outload)
                values (%s, %s, %s, %s, %s, %s)""", consistency_level=ConsistencyLevel.ANY)
                session.execute(stmt, (uuid.uuid4(), executable, ints, inload, outts, outload))
    except psycopg2.Error as err:
        print("SQLError {0}".format(err))
    finally:
        zk.stop()
        zk.close()
        cur.close()
        conn.close()
Example #11
0
 def active(self):
     t = self._threshold
     if t == "n":
         t = multiprocessing.cpu_count()
     if os.getloadavg()[0] > float(t):
         return True
     return False
Example #12
0
    def get_loadavg(self):

        (d1, d2, d3) = os.getloadavg()
        self.loadavg['1'] = d1
        self.loadavg['5'] = d2
        self.loadavg['10'] = d3
        return self.loadavg
Example #13
0
def system_load(format='{avg:.1f}', threshold_good=1, threshold_bad=2):
	'''Return normalized system load average.

	Highlights using ``system_load_good``, ``system_load_bad`` and
	``system_load_ugly`` highlighting groups, depending on the thresholds
	passed to the function.

	:param str format:
		format string, receives ``avg`` as an argument
	:param float threshold_good:
		threshold for "good load" highlighting
	:param float threshold_bad:
		threshold for "bad load" highlighting
	'''
	cpu_num = cpu_count()
	ret = []
	for avg in os.getloadavg():
		normalized = avg / cpu_num
		if normalized < threshold_good:
			hl = 'system_load_good'
		elif normalized < threshold_bad:
			hl = 'system_load_bad'
		else:
			hl = 'system_load_ugly'
		ret.append({
			'contents': format.format(avg=avg),
			'highlight_group': [hl, 'system_load'],
			'draw_divider': False,
			'divider_highlight_group': 'background:divider',
			})
	ret[0]['draw_divider'] = True
	ret[0]['contents'] += ' '
	ret[1]['contents'] += ' '
	return ret
Example #14
0
File: web.py Project: 5n1p/psdash
def index():
    load_avg = os.getloadavg()
    uptime = datetime.now() - datetime.fromtimestamp(psutil.get_boot_time())
    disks = get_disks()
    users = get_users()

    netifs = get_network_interfaces()
    netifs.sort(key=lambda x: x.get("bytes_sent"), reverse=True)

    data = {
        "os": platform.platform().decode("utf-8"),
        "hostname": socket.gethostname().decode("utf-8"),
        "uptime": str(uptime).split(".")[0],
        "load_avg": load_avg,
        "cpus": psutil.NUM_CPUS,
        "vmem": psutil.virtual_memory(),
        "swap": psutil.swap_memory(),
        "disks": disks,
        "cpu_percent": psutil.cpu_times_percent(0),
        "users": users,
        "net_interfaces": netifs,
        "page": "overview",
        "is_xhr": request.is_xhr
    }

    return render_template("index.html", **data)
Example #15
0
    def _health_loop(self):
        topic='{0}/linux_state'.format(self.id)
        
        msg=linux_state()
        msg.header=header()
        msg.header.seq=0
        
        while True:
            msg.header.seq+=1
            msg.header.time=time.time()
            
            msg.temp=get_temp()
            
            cpu_perc=[0,0,0,0]
            cpu_percent=psutil.cpu_percent(percpu=True)
            for i in range(len(cpu_percent)):
                cpu_perc[i] = cpu_percent[i]
                
                
            msg.cpu_use=cpu_perc
            msg.load_average=os.getloadavg()
            
            msg.uptime=time.time()-psutil.boot_time()
            msg.memory_use=psutil.virtual_memory().used/(1024*1024)

            try:
                self.lcm.publish(topic, msg.encode())
            except IOError, e:
                print e
            time.sleep(.5)
Example #16
0
 def _get_cpu_load_avg(self):
     load_avg = os.getloadavg()
     cpu_load_avg = CpuLoadAvg()
     cpu_load_avg.one_min_avg = load_avg[0]
     cpu_load_avg.five_min_avg = load_avg[1]
     cpu_load_avg.fifteen_min_avg = load_avg[2]
     return cpu_load_avg
    def preProcess(self, _edObject=None):
        EDPluginControl.preProcess(self)
        self.DEBUG("EDPluginControlXia2DIALSv1_0.preProcess")
        self.screen("Xia2DIALS processing started")

        if self.dataInput.doAnomAndNonanom is not None:
            if self.dataInput.doAnomAndNonanom.value:
                self.doAnomAndNonanom = True
            else:
                self.doAnomAndNonanom = False

        self.strHost = socket.gethostname()
        self.screen("Running on {0}".format(self.strHost))
        try:
            strLoad = os.getloadavg()
            self.screen("System load avg: {0}".format(strLoad))
        except OSError:
            pass

        self.edPluginWaitFileFirst = self.loadPlugin("EDPluginMXWaitFilev1_1", "MXWaitFileFirst")
        self.edPluginWaitFileLast = self.loadPlugin("EDPluginMXWaitFilev1_1", "MXWaitFileLast")

        self.edPluginRetrieveDataCollection = self.loadPlugin("EDPluginISPyBRetrieveDataCollectionv1_4")
        self.edPluginExecXia2DIALSAnom = self.loadPlugin("EDPluginExecXia2DIALSv1_0", "EDPluginExecXia2DIALSv1_0_anom")
        if self.doAnomAndNonanom:
            self.edPluginExecXia2DIALSNoanom = self.loadPlugin("EDPluginExecXia2DIALSv1_0", "EDPluginExecXia2DIALSv1_0_noanom")
Example #18
0
def system_info():
    viewer_log_file = '/tmp/sync_viewer.log'
    if path.exists(viewer_log_file):
        viewlog = check_output(['tail', '-n', '20', viewer_log_file]).split('\n')
    else:
        viewlog = ["(no viewer log present -- is only the sync server running?)\n"]

    # Get load average from last 15 minutes and round to two digits.
    loadavg = round(getloadavg()[2], 2)

    try:
        run_tvservice = check_output(['tvservice', '-s'])
        display_info = re_split('\||,', run_tvservice.strip('state:'))
    except:
        display_info = False

    # Calculate disk space
    slash = statvfs("/")
    free_space = size(slash.f_bavail * slash.f_frsize)

    # Get uptime
    uptime_in_seconds = uptime()
    system_uptime = timedelta(seconds=uptime_in_seconds)

    return template('system_info', viewlog=viewlog, loadavg=loadavg, free_space=free_space, uptime=system_uptime, display_info=display_info)
Example #19
0
    def extractData(self):
        st = os.statvfs(self.path)
        load_status = [0, 1, 2]
        totalspace = st.f_blocks*st.f_frsize
        usedspace = (st.f_blocks-st.f_bavail)*st.f_frsize
        freespace = st.f_bavail*st.f_frsize
        freespace_percentage = 100.0*float(freespace)/float(totalspace)
        load = os.getloadavg()
        data = {
        }
        data.update({
            "space_total":totalspace,
            "space_used":usedspace,
            "space_free":freespace,
            "avg_load_last_1min":load[0],
            "avg_load_last_5min":load[1],
            "avg_load_last_15min":load[2]
        })
        if freespace_percentage < self.sp_limit2:
            sp_status = 0.0
        elif freespace_percentage < self.sp_limit1:
            sp_status = 0.5
        else:
            sp_status = 1.0

        for i in range(3):
            if load[i] > self.load_limit_crit[i]:
                load_status[i] = 0.0
            elif load[i] > self.load_limit_warn[i]:
                load_status[i] = 0.5
            else:
                load_status[i] = 1.0
        data["status"] = min(load_status[0],load_status[1],load_status[2],sp_status)

        return data
Example #20
0
def getinfo():
	global data
	data = "Information from KACANTOUTSB vscout\n"
	data += str(datetime.now()) + "\n"
	#for disk in subprocess.check_output(['df','-h']).split('\n'):
	#    data += disk
	data += str(os.getloadavg()) + "\n"
Example #21
0
    def func(self):
        "Show times."

        table = [["Current server uptime:",
                  "Total server running time:",
                  "Total in-game time (realtime x %g):" % (gametime.TIMEFACTOR),
                  "Server time stamp:"
                  ],
                 [utils.time_format(time.time() - SESSIONS.server.start_time, 3),
                  utils.time_format(gametime.runtime(format=False), 2),
                  utils.time_format(gametime.gametime(format=False), 2),
                  datetime.datetime.now()
                  ]]
        if utils.host_os_is('posix'):
            loadavg = os.getloadavg()
            table[0].append("Server load (per minute):")
            table[1].append("%g" % (loadavg[0]))
        stable = []
        for col in table:
            stable.append([str(val).strip() for val in col])
        ftable = utils.format_table(stable, 5)
        string = ""
        for row in ftable:
            string += "\n " + "{w%s{n" % row[0] + "".join(row[1:])
        self.caller.msg(string)
Example #22
0
def sample_loadavg():
    _1, _5, _15 = os.getloadavg()
    return {
        'loadavg_1m': _1,
        'loadavg_5m': _5,
        'loadavg_15m': _15,
    }
Example #23
0
    def _log_stats(self, request, options):

        # def args_to_unicode(args):
        #     unicode_args = {}
        #     for key, val in args.items():
        #         key = to_unicode(key)
        #         if isinstance(val, list):
        #             val = [args_to_unicode(item)item.decode('utf-8') for item in val]
        #         else:
        #             val = val.decode('utf-8')
        #         unicode_args[key] = val
        #         return unicode_args

        msg = {
            # Anything we retrieve from Twisted request object contains bytes.
            # We have to convert it to unicode first for json.dump to succeed.
            "path": request.path.decode('utf-8'),
            "rendertime": time.time() - request.starttime,
            "maxrss": resource.getrusage(resource.RUSAGE_SELF).ru_maxrss,
            "load": os.getloadavg(),
            "fds": get_num_fds(),
            "active": len(self.pool.active),
            "qsize": len(self.pool.queue.pending),
            "_id": id(request),
            "method": request.method.decode('ascii'),
            "timestamp": int(time.time()),
            "user-agent": (request.getHeader(b"user-agent").decode('utf-8')
                           if request.getHeader(b"user-agent") else None),
            "args": repr(options)
        }
        log.msg(json.dumps(msg), system="events")
Example #24
0
def _system_info(request=None):
    # OS, hostname, release
    __, hostname, __ = os.uname()[0:3]
    platform = subprocess.check_output(["sysctl", "-n", "hw.model"])
    physmem = str(int(int(subprocess.check_output(["sysctl", "-n", "hw.physmem"])) / 1048576)) + "MB"
    # All this for a timezone, because time.asctime() doesn't add it in.
    date = time.strftime("%a %b %d %H:%M:%S %Z %Y") + "\n"
    uptime = subprocess.check_output("env -u TZ uptime | " "awk -F', load averages:' '{ print $1 }'", shell=True)
    loadavg = "%.2f, %.2f, %.2f" % os.getloadavg()

    freenas_build = "Unrecognized build (%s        missing?)" % VERSION_FILE
    try:
        with open(VERSION_FILE) as d:
            freenas_build = d.read()
    except:
        pass

    if request:
        host = request.META.get("HTTP_HOST")
    else:
        host = None

    return {
        "hostname": hostname,
        "platform": platform,
        "physmem": physmem,
        "date": date,
        "uptime": uptime,
        "loadavg": loadavg,
        "freenas_build": freenas_build,
        "host": host,
    }
Example #25
0
def _getSummary():
  """
  :returns: a _CsvRow object
  """
  timestamp = time.time()
  loadAvg1, loadAvg5, loadAvg15 = os.getloadavg()
  cpuTimesPct = psutil.cpu_times_percent(interval=0)
  virtualMem = psutil.virtual_memory()
  swapMem = psutil.swap_memory()

  row = _CsvRow(
    timestamp=timestamp,
    loadAvg1=loadAvg1,
    loadAvg5=loadAvg5,
    loadAvg15=loadAvg15,
    cpuUserPct=cpuTimesPct.user,
    cpuSystemPct=cpuTimesPct.system,
    cpuNicePct=cpuTimesPct.nice,
    cpuIdlePct=cpuTimesPct.idle,
    memTotalB=virtualMem.total,
    memUsageB=virtualMem.total - virtualMem.available,
    memAvailB=virtualMem.available,
    memUsagePct=virtualMem.percent,
    memBuffersB=virtualMem.buffers if hasattr(virtualMem, "buffers") else None,
    memCachedB=virtualMem.cached if hasattr(virtualMem, "cached") else None,
    swapTotalB=swapMem.total,
    swapUsedB=swapMem.used,
    swapFreeB=swapMem.free,
    swapUsedPct=swapMem.percent,
    swapInsB=swapMem.sin,
    swapOutsB=swapMem.sout
  )

  return row
Example #26
0
def _system_info(request=None):
    # OS, hostname, release
    __, hostname, __ = os.uname()[0:3]
    platform = subprocess.check_output(['sysctl', '-n', 'hw.model'])
    physmem = str(int(int(
        subprocess.check_output(['sysctl', '-n', 'hw.physmem'])
    ) / 1048576)) + 'MB'
    # All this for a timezone, because time.asctime() doesn't add it in.
    date = time.strftime('%a %b %d %H:%M:%S %Z %Y') + '\n'
    uptime = subprocess.check_output(
        "env -u TZ uptime | awk -F', load averages:' '{ print $1 }'",
        shell=True
    )
    loadavg = "%.2f, %.2f, %.2f" % os.getloadavg()

    freenas_build = "Unrecognized build (%s        missing?)" % VERSION_FILE
    try:
        with open(VERSION_FILE) as d:
            freenas_build = d.read()
    except:
        pass

    return {
        'hostname': hostname,
        'platform': platform,
        'physmem': physmem,
        'date': date,
        'uptime': uptime,
        'loadavg': loadavg,
        'freenas_build': freenas_build,
    }
def run(sock, delay):
    """Make the client go go go"""
    while True:
        # Epoch, timestamp in seconds since 1970
        now = int(time.time())

        # Initialize the protobuf payload
        payload_pb = Payload()

        labels = ['1min', '5min', '15min']
        for name, value in zip(labels, os.getloadavg()):
            m = payload_pb.metrics.add()
            m.metric = 'system.loadavg_' + name
            p = m.points.add()
            p.timestamp = now
            p.value = value

        print("sending message")
        print(('-' * 80))
        print(payload_pb)

        package = payload_pb.SerializeToString()

        # The message must be prepended with its size
        size = struct.pack('!L', len(package))
        sock.sendall(size)

        # Then send the actual payload
        sock.sendall(package)

        time.sleep(delay)
Example #28
0
	def heart_beat(self):
		while(1):
			my_load=os.getloadavg()
			my_load=my_load[0]
			command="load#"+str(my_load)
			self.socket.sendto(command, (self.server_ip, self.tx_port))
			sleep(0.25)
    def cpu_stats(self):
        cfg_process = psutil.Process(os.getpid())
        while True:
            # collect Vmsizes
            self._ip_change = 0
            self._build_change = 0
            rss = cfg_process.get_memory_info().rss
            if (self._rss != rss):
                self._rss = rss

            vms = cfg_process.get_memory_info().vms
            if (self._vms != vms):
                self._vms = vms

            pvms = vms
            if (pvms > self._pvms):
                self._pvms = pvms

            if self._sysinfo:
                # collect CPU Load avg
                load_avg = os.getloadavg()
                if (load_avg != self._load_avg):
                    self._load_avg = load_avg

                # collect systemmeory info
                phymem_usage = psutil.phymem_usage()
                if (phymem_usage != self._phymem_usage):
                    self._phymem_usage = phymem_usage

                phymem_buffers = psutil.phymem_buffers()
                if (phymem_buffers != self._phymem_buffers):
                    self._phymem_buffers = phymem_buffers

                if (self._new_ip != self._curr_ip):
                    self._new_ip = self.get_config_node_ip()
                    self._ip_change = 1

                # Retrieve build_info from package/rpm and cache it
                if self._curr_build_info is None:
                    command = "contrail-version contrail-config | grep 'contrail-config'"
                    version = os.popen(command).read()
                    _, rpm_version, build_num = version.split()
                    self._new_build_info = build_info + '"build-id" : "' + \
                        rpm_version + '", "build-number" : "' + \
                        build_num + '"}]}'
                if (self._new_build_info != self._curr_build_info):
                    self._curr_build_info = self._new_build_info
                    self._build_change = 1

            num_cpus = psutil.NUM_CPUS
            if (num_cpus != self._num_cpus):
                self._num_cpus = num_cpus

            cpu_percent = cfg_process.get_cpu_percent(interval=0.1)
            cpu_share = cpu_percent / num_cpus
            self._cpu_share = cpu_share

            self._send_cpustats()

            gevent.sleep(self._time_interval)
Example #30
0
def current_status():

    load = "%.2f %.2f %.2f" % os.getloadavg()

    process = subprocess.Popen(
        [
            config.KAFKA_RUN_CLASS_BINARY,
            "kafka.tools.ConsumerOffsetChecker",
            "--topic",
            "listens",
            "--group",
            "listen-group",
        ],
        stdout=subprocess.PIPE,
    )
    out, err = process.communicate()

    print out

    lines = out.split("\n")
    data = []
    for line in lines:
        if line.startswith("listen-group"):
            data = line.split()

    if len(data) >= 6:
        kafka_stats = {
            "offset": locale.format("%d", int(data[3]), grouping=True),
            "size": locale.format("%d", int(data[4]), grouping=True),
            "lag": locale.format("%d", int(data[5]), grouping=True),
        }
    else:
        kafka_stats = {"offset": "(unknown/empty)", "size": "-", "lag": "-"}

    return render_template("index/current-status.html", load=load, kstats=kafka_stats)
Example #31
0
#!/usr/bin/python3
import os, sys

os.system("clear")
os.system("ls")

print(os.name)

print(sys.platform)

print(os.getpid())

print(os.listdir())

print(os.getcwd())

print(os.getloadavg())

print(os.cpu_count())
        args.emails, '[%s:%s] test-zstd-speed.py %s has been started' %
        (email_header, pid, script_version), args.message, have_mutt,
        have_mail)
    with open(pidfile, 'w') as the_file:
        the_file.write(pid)

    branch = ""
    commit = ""
    first_time = True
    while True:
        try:
            if first_time:
                first_time = False
            else:
                time.sleep(args.sleepTime)
            loadavg = os.getloadavg()[0]
            if (loadavg <= args.maxLoadAvg):
                branches = git_get_branches()
                for branch in branches:
                    commit = execute('git show -s --format=%h ' + branch,
                                     verbose)[0]
                    last_commit = update_config_file(branch, commit)
                    if commit == last_commit:
                        log("skipping branch %s: head %s already processed" %
                            (branch, commit))
                    else:
                        log("build branch %s: head %s is different from prev %s"
                            % (branch, commit, last_commit))
                        execute('git checkout -- . && git checkout ' + branch)
                        print(git_get_changes(branch, commit, last_commit))
                        test_commit(branch, commit, last_commit, args,
def benchmark_and_compare(branch, commit, last_commit, args, executableName,
                          md5sum, compilerVersion, resultsFileName,
                          testFilePath, fileName, last_csize, last_cspeed,
                          last_dspeed):
    sleepTime = 30
    while os.getloadavg()[0] > args.maxLoadAvg:
        log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds"
            % (os.getloadavg()[0], args.maxLoadAvg, sleepTime))
        time.sleep(sleepTime)
    start_load = str(os.getloadavg())
    osType = platform.system()
    if osType == 'Linux':
        cpuSelector = "taskset --cpu-list 0"
    else:
        cpuSelector = ""
    if args.dictionary:
        result = execute('%s programs/%s -rqi5b1e%s -D %s %s' %
                         (cpuSelector, executableName, args.lastCLevel,
                          args.dictionary, testFilePath),
                         print_output=True)
    else:
        result = execute(
            '%s programs/%s -rqi5b1e%s %s' %
            (cpuSelector, executableName, args.lastCLevel, testFilePath),
            print_output=True)
    end_load = str(os.getloadavg())
    linesExpected = args.lastCLevel + 1
    if len(result) != linesExpected:
        raise RuntimeError(
            "ERROR: number of result lines=%d is different that expected %d\n%s"
            % (len(result), linesExpected, '\n'.join(result)))
    with open(resultsFileName, "a") as myfile:
        myfile.write('%s %s %s md5=%s\n' %
                     (branch, commit, compilerVersion, md5sum))
        myfile.write('\n'.join(result) + '\n')
        myfile.close()
        if (last_cspeed == None):
            log("WARNING: No data for comparison for branch=%s file=%s " %
                (branch, fileName))
            return ""
        commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
        text = ""
        for i in range(0, min(len(cspeed), len(last_cspeed))):
            print(
                "%s:%s -%d cSpeed=%6.2f cLast=%6.2f cDiff=%1.4f dSpeed=%6.2f dLast=%6.2f dDiff=%1.4f ratioDiff=%1.4f %s"
                %
                (branch, commit, i + 1, cspeed[i], last_cspeed[i], cspeed[i] /
                 last_cspeed[i], dspeed[i], last_dspeed[i], dspeed[i] /
                 last_dspeed[i], float(last_csize[i]) / csize[i], fileName))
            if (cspeed[i] / last_cspeed[i] < args.lowerLimit):
                text += "WARNING: %s -%d cSpeed=%.2f cLast=%.2f cDiff=%.4f %s\n" % (
                    executableName, i + 1, cspeed[i], last_cspeed[i],
                    cspeed[i] / last_cspeed[i], fileName)
            if (dspeed[i] / last_dspeed[i] < args.lowerLimit):
                text += "WARNING: %s -%d dSpeed=%.2f dLast=%.2f dDiff=%.4f %s\n" % (
                    executableName, i + 1, dspeed[i], last_dspeed[i],
                    dspeed[i] / last_dspeed[i], fileName)
            if (float(last_csize[i]) / csize[i] < args.ratioLimit):
                text += "WARNING: %s -%d cSize=%d last_cSize=%d diff=%.4f %s\n" % (
                    executableName, i + 1, csize[i], last_csize[i],
                    float(last_csize[i]) / csize[i], fileName)
        if text:
            text = args.message + (
                "\nmaxLoadAvg=%s  load average at start=%s end=%s\n%s  last_commit=%s  md5=%s\n"
                % (args.maxLoadAvg, start_load, end_load, compilerVersion,
                   last_commit, md5sum)) + text
        return text
Example #34
0
 def load_fair():
     try:
         load = os.getloadavg()[0] / _cpu_count
     except OSError:  # as of May 2016, Windows' Linux subsystem throws OSError on getloadavg
         load = -1
     return 'load', load
Example #35
0
 def get_load(self):
     return os.getloadavg()  # 1, 5, 15 min
Example #36
0
def getCPUload():

    ldavg = os.getloadavg()[1]
    nbrcpu = multiprocessing.cpu_count()
    return float((ldavg / nbrcpu) * 100)
Example #37
0
 def _watch_load(self) -> None:
     """Pause consuming messages if lood goes above the allowed limit."""
     while not self.shutdown_pending.wait(1):
         self._current_load = os.getloadavg()[0]
Example #38
0
 def update(self):
     """Get the latest system information."""
     if self.type == "disk_use_percent":
         self._state = psutil.disk_usage(self.argument).percent
     elif self.type == "disk_use":
         self._state = round(psutil.disk_usage(self.argument).used / 1024 ** 3, 1)
     elif self.type == "disk_free":
         self._state = round(psutil.disk_usage(self.argument).free / 1024 ** 3, 1)
     elif self.type == "memory_use_percent":
         self._state = psutil.virtual_memory().percent
     elif self.type == "memory_use":
         virtual_memory = psutil.virtual_memory()
         self._state = round(
             (virtual_memory.total - virtual_memory.available) / 1024 ** 2, 1
         )
     elif self.type == "memory_free":
         self._state = round(psutil.virtual_memory().available / 1024 ** 2, 1)
     elif self.type == "swap_use_percent":
         self._state = psutil.swap_memory().percent
     elif self.type == "swap_use":
         self._state = round(psutil.swap_memory().used / 1024 ** 2, 1)
     elif self.type == "swap_free":
         self._state = round(psutil.swap_memory().free / 1024 ** 2, 1)
     elif self.type == "processor_use":
         self._state = round(psutil.cpu_percent(interval=None))
     elif self.type == "processor_temperature":
         self._state = self.read_cpu_temperature()
     elif self.type == "process":
         for proc in psutil.process_iter():
             try:
                 if self.argument == proc.name():
                     self._state = STATE_ON
                     return
             except psutil.NoSuchProcess as err:
                 _LOGGER.warning(
                     "Failed to load process with id: %s, old name: %s",
                     err.pid,
                     err.name,
                 )
         self._state = STATE_OFF
     elif self.type == "network_out" or self.type == "network_in":
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             counter = counters[self.argument][IO_COUNTER[self.type]]
             self._state = round(counter / 1024 ** 2, 1)
         else:
             self._state = None
     elif self.type == "packets_out" or self.type == "packets_in":
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             self._state = counters[self.argument][IO_COUNTER[self.type]]
         else:
             self._state = None
     elif (
         self.type == "throughput_network_out"
         or self.type == "throughput_network_in"
     ):
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             counter = counters[self.argument][IO_COUNTER[self.type]]
             now = dt_util.utcnow()
             if self._last_value and self._last_value < counter:
                 self._state = round(
                     (counter - self._last_value)
                     / 1000 ** 2
                     / (now - self._last_update_time).seconds,
                     3,
                 )
             else:
                 self._state = None
             self._last_update_time = now
             self._last_value = counter
         else:
             self._state = None
     elif self.type == "ipv4_address" or self.type == "ipv6_address":
         addresses = psutil.net_if_addrs()
         if self.argument in addresses:
             for addr in addresses[self.argument]:
                 if addr.family == IF_ADDRS_FAMILY[self.type]:
                     self._state = addr.address
         else:
             self._state = None
     elif self.type == "last_boot":
         self._state = dt_util.as_local(
             dt_util.utc_from_timestamp(psutil.boot_time())
         ).isoformat()
     elif self.type == "load_1m":
         self._state = round(os.getloadavg()[0], 2)
     elif self.type == "load_5m":
         self._state = round(os.getloadavg()[1], 2)
     elif self.type == "load_15m":
         self._state = round(os.getloadavg()[2], 2)
Example #39
0
 def update(self):
     """Get the latest system information."""
     import psutil
     if self.type == 'disk_use_percent':
         self._state = psutil.disk_usage(self.argument).percent
     elif self.type == 'disk_use':
         self._state = round(psutil.disk_usage(self.argument).used /
                             1024**3, 1)
     elif self.type == 'disk_free':
         self._state = round(psutil.disk_usage(self.argument).free /
                             1024**3, 1)
     elif self.type == 'memory_use_percent':
         self._state = psutil.virtual_memory().percent
     elif self.type == 'memory_use':
         virtual_memory = psutil.virtual_memory()
         self._state = round((virtual_memory.total -
                              virtual_memory.available) /
                             1024**2, 1)
     elif self.type == 'memory_free':
         self._state = round(psutil.virtual_memory().available / 1024**2, 1)
     elif self.type == 'swap_use_percent':
         self._state = psutil.swap_memory().percent
     elif self.type == 'swap_use':
         self._state = round(psutil.swap_memory().used / 1024**3, 1)
     elif self.type == 'swap_free':
         self._state = round(psutil.swap_memory().free / 1024**3, 1)
     elif self.type == 'processor_use':
         self._state = round(psutil.cpu_percent(interval=None))
     elif self.type == 'process':
         for proc in psutil.process_iter():
             try:
                 if self.argument == proc.name():
                     self._state = STATE_ON
                     return
             except psutil.NoSuchProcess as err:
                 _LOGGER.warning(
                     "Failed to load process with id: %s, old name: %s",
                     err.pid, err.name)
         self._state = STATE_OFF
     elif self.type == 'network_out' or self.type == 'network_in':
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             counter = counters[self.argument][IO_COUNTER[self.type]]
             self._state = round(counter / 1024**2, 1)
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'packets_out' or self.type == 'packets_in':
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             self._state = counters[self.argument][IO_COUNTER[self.type]]
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'ipv4_address' or self.type == 'ipv6_address':
         addresses = psutil.net_if_addrs()
         if self.argument in addresses:
             self._state = addresses[self.argument][IF_ADDRS[self.type]][1]
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'last_boot':
         self._state = dt_util.as_local(
             dt_util.utc_from_timestamp(psutil.boot_time())
         ).date().isoformat()
     elif self.type == 'since_last_boot':
         self._state = dt_util.utcnow() - dt_util.utc_from_timestamp(
             psutil.boot_time())
     elif self.type == 'load_1m':
         self._state = os.getloadavg()[0]
     elif self.type == 'load_5m':
         self._state = os.getloadavg()[1]
     elif self.type == 'load_15m':
         self._state = os.getloadavg()[2]
Example #40
0
def mon_performance():

    # get CPU utilization as a percentage
    cpuload = psutil.cpu_percent(interval=1)

    # get system/CPU load
    load1m, _, _ = os.getloadavg()

    print("\nTotal CPU usage:", cpuload, "%")
    print("Total system load:", load1m)
    print("Average temp. of all cores:", avg_all_core_temp, "°C")

    # get system/CPU load
    load1m, _, _ = os.getloadavg()

    if psutil.cpu_percent(percpu=False, interval=0.01) >= 20.0 or max(psutil.cpu_percent(percpu=True, interval=0.01)) >= 75:
        print("\nHigh CPU load")

        # high cpu usage trigger
        if cpuload >= 20:
            print("suggesting to set turbo boost: on")
            get_turbo()

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 70:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("suggesting to set turbo boost: off")
            get_turbo()
        else:
            print("suggesting to set turbo boost: on")
            get_turbo()

    elif load1m > performance_load_threshold:
        print("\nHigh system load")

        # high cpu usage trigger
        if cpuload >= 20:
            print("suggesting to set turbo boost: on")
            get_turbo()

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 65:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("suggesting to set turbo boost: off")
            get_turbo()
        else:
            print("suggesting to set turbo boost: on")
            get_turbo()

    else:
        print("\nLoad optimal")

        # high cpu usage trigger
        if cpuload >= 20:
            print("suggesting to set turbo boost: on")
            get_turbo()

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 60:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("suggesting to set turbo boost: off")
            get_turbo()
        else:
            print("suggesting to set turbo boost: on")
            get_turbo()

    footer()
Example #41
0
    def get(self) -> Response:

        # This is the average system load calculated over a given period of time
        # of 1, 5 and 15 minutes.
        # In our case, we will show the load average over a period of 15 minutes.
        # The numbers returned by os.getloadavg() only make sense if
        # related to the number of CPU cores installed on the system.

        # Here we are converting the load average into percentage.
        # The higher the percentage the higher the load
        load_percentage = (100 * os.getloadavg()[-1]) / (os.cpu_count() or 1)

        vmstat = local["vmstat"]

        vmstat_out1 = vmstat().split("\n")
        vmstat_out1 = re.split(r"\s+", vmstat_out1[2])

        # convert list in dict
        vmstat_out1 = {k: v for k, v in enumerate(vmstat_out1)}

        # summarize disk statistics
        # vmstat_out2 = vmstat(["-D"]).split('\n')
        # Example:
        #       22 disks
        #        0 partitions
        #   273820 total reads
        #    63034 merged reads
        # 27787446 read sectors
        #  2395193 milli reading
        #   116450 writes
        #   438666 merged writes
        #  4467248 written sectors
        # 15377932 milli writing
        #        0 inprogress IO
        #     1412 milli spent IO

        # event counter statistics
        vmstat_out2 = vmstat(["-s", "-S", "M"]).split("\n")

        boot_time = datetime.fromtimestamp(
            int(vmstat_out2[24].strip().split(" ")[0]))

        # Disk usage
        # Get total disk size, used disk space, and free disk
        total, used, free = shutil.disk_usage("/")

        # Network latency
        # Here we will ping google at an interval of five seconds for five times
        # min response time, average response time, and the max response time.
        # ping = local["ping"]
        # ping_result = ping(["-c", "5", "google.com"]).split("\n")

        # ping_result = ping_result[-2].split("=")[-1].split("/")[:3]

        statistics: StatsType = {
            "system": {
                "boot_time": boot_time
            },
            "cpu": {
                # Get Physical and Logical CPU Count
                "count": os.cpu_count() or 0,
                "load_percentage": load_percentage,
                # System
                #     in: The number of interrupts per second, including the clock.
                #     cs: The number of context switches per second.
                # in = vm.get(11, 0)
                # cs = vm.get(12, 0)
                # CPU
                #     These are percentages of total CPU time.
                #     us: Time spent running non-kernel code. (user time and nice time)
                #     sy: Time spent running kernel code. (system time)
                #     id: Time spent idle.
                #     wa: Time spent waiting for IO.
                #     st: Time stolen from a virtual machine.
                "user": vmstat_out1.get(13, 0),
                "system": vmstat_out1.get(14, 0),
                "idle": vmstat_out1.get(15, 0),
                "wait": vmstat_out1.get(16, 0),
                "stolen": vmstat_out1.get(17, 0),
            },
            "ram": {
                "total": vmstat_out2[0].strip().split(" ")[0],
                "used": vmstat_out2[1].strip().split(" ")[0],
                "active": vmstat_out2[2].strip().split(" ")[0],
                "inactive": vmstat_out2[3].strip().split(" ")[0],
                "free": vmstat_out2[4].strip().split(" ")[0],
                "buffer": vmstat_out2[5].strip().split(" ")[0],
                "cache": vmstat_out2[6].strip().split(" ")[0],
            },
            "swap": {
                # Swap
                #     si: Amount of memory swapped in from disk (/s).
                #     so: Amount of memory swapped to disk (/s).
                "from_disk": vmstat_out1.get(7, 0),
                "to_disk": vmstat_out1.get(8, 0),
                "total": vmstat_out2[7].strip().split(" ")[0],
                "used": vmstat_out2[8].strip().split(" ")[0],
                "free": vmstat_out2[9].strip().split(" ")[0],
            },
            "disk": {
                "total_disk_space": total / 1024**3,
                "used_disk_space": used / 1024**3,
                "free_disk_space": free / 1024**3,
                "occupacy": 100 * used / total,
            },
            "procs": {
                # Procs
                # r: The number of processes waiting for run time.
                # b: The number of processes in uninterruptible sleep.
                "waiting_for_run": vmstat_out1.get(1, 0),
                "uninterruptible_sleep": vmstat_out1.get(2, 0),
            },
            "io": {
                # IO
                #     bi: Blocks received from a block device (blocks/s).
                #     bo: Blocks sent to a block device (blocks/s).
                "blocks_received": vmstat_out1.get(9, 0),
                "blocks_sent": vmstat_out1.get(10, 0),
            },
            "network_latency": {
                # "min": ping_result[0].strip(),
                # "avg": ping_result[1].strip(),
                # "max": ping_result[2].strip(),
                "min": 0,
                "avg": 0,
                "max": 0,
            },
        }

        return self.response(statistics)
Example #42
0
 def get_load_average(self,):
     return os.getloadavg()[0]
Example #43
0
def get_load():
	return os.getloadavg()[0]
Example #44
0
def set_performance():

    print(f"Setting to use: \"{get_avail_performance()}\" governor")
    run(f"cpufreqctl.auto-cpufreq --governor --set={get_avail_performance()}", shell=True)
    if Path("/sys/devices/system/cpu/cpu0/cpufreq/energy_performance_preference").exists() and Path("/sys/devices/system/cpu/intel_pstate/hwp_dynamic_boost").exists() == False:
        run("cpufreqctl.auto-cpufreq --epp --set=balance_performance", shell=True)
        print("Setting to use: \"balance_performance\" EPP")

    # get CPU utilization as a percentage
    cpuload = psutil.cpu_percent(interval=1)

    # get system/CPU load
    load1m, _, _ = os.getloadavg()

    print("\nTotal CPU usage:", cpuload, "%")
    print("Total system load:", load1m)
    print("Average temp. of all cores:", avg_all_core_temp, "°C")

    if psutil.cpu_percent(percpu=False, interval=0.01) >= 20.0 or max(psutil.cpu_percent(percpu=True, interval=0.01)) >= 75:
        print("\nHigh CPU load")

        # high cpu usage trigger
        if cpuload >= 20:
            print("setting turbo boost: on")
            turbo(True)

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 70:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("setting turbo boost: off")
            turbo(False)
        else:
            print("setting turbo boost: on")
            turbo(True)

    elif load1m >= performance_load_threshold:
        print("\nHigh system load")

        # high cpu usage trigger
        if cpuload >= 20:
            print("setting turbo boost: on")
            turbo(True)

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 65:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("setting turbo boost: off")
            turbo(False)
        else:
            print("setting turbo boost: on")
            turbo(True)

    else:
        print("\nLoad optimal")

        # high cpu usage trigger
        if cpuload >= 20:
            print("setting turbo boost: on")
            turbo(True)

        # set turbo state based on average of all core temperatures
        elif cpuload <= 25 and avg_all_core_temp >= 60:
            print("Optimal total CPU usage:", cpuload, "%, high average core temp:", avg_all_core_temp, "°C")
            print("setting turbo boost: off")
            turbo(False)
        else:
            print("setting turbo boost: on")
            turbo(True)

    footer()
Example #45
0
 def test_os_getloadavg(self):
     os = self.posix
     l0, l1, l2 = os.getloadavg()
     assert type(l0) is float and l0 >= 0.0
     assert type(l1) is float and l0 >= 0.0
     assert type(l2) is float and l0 >= 0.0
Example #46
0
def get_load():
	try:
		r = os.getloadavg()
		return float(r[0])
	except (EnvironmentError, ValueError):
		return float(0.0)
Example #47
0
def main():
    '''
    Get health and status stats and post to ES
    Post both as a historical reference (for charts)
    and as a static docid (for realtime current health/EPS displays)
    '''
    logger.debug('starting')
    logger.debug(options)
    es = ElasticsearchClient(
        (list('{0}'.format(s) for s in options.esservers)))
    index = options.index

    with open(options.default_mapping_file, 'r') as mapping_file:
        default_mapping_contents = json.loads(mapping_file.read())

    if not es.index_exists(index):
        try:
            logger.debug('Creating %s index' % index)
            es.create_index(index, default_mapping_contents)
        except Exception as e:
            logger.error("Unhandled exception, terminating: %r" % e)

    auth = HTTPBasicAuth(options.mquser, options.mqpassword)

    for server in options.mqservers:
        logger.debug('checking message queues on {0}'.format(server))
        r = requests.get('http://{0}:{1}/api/queues'.format(
            server, options.mqapiport),
                         auth=auth)

        mq = r.json()
        # setup a log entry for health/status.
        healthlog = dict(utctimestamp=toUTC(datetime.now()).isoformat(),
                         hostname=server,
                         processid=os.getpid(),
                         processname=sys.argv[0],
                         severity='INFO',
                         summary='mozdef health/status',
                         category='mozdef',
                         source='mozdef',
                         tags=[],
                         details=[])

        healthlog['details'] = dict(username='******')
        healthlog['details']['loadaverage'] = list(os.getloadavg())
        healthlog['details']['queues'] = list()
        healthlog['details']['total_deliver_eps'] = 0
        healthlog['details']['total_publish_eps'] = 0
        healthlog['details']['total_messages_ready'] = 0
        healthlog['tags'] = ['mozdef', 'status']
        for m in mq:
            if 'message_stats' in m.keys() and isinstance(
                    m['message_stats'], dict):
                if 'messages_ready' in m.keys():
                    mready = m['messages_ready']
                    healthlog['details']['total_messages_ready'] += m[
                        'messages_ready']
                else:
                    mready = 0
                if 'messages_unacknowledged' in m.keys():
                    munack = m['messages_unacknowledged']
                else:
                    munack = 0
                queueinfo = dict(queue=m['name'],
                                 vhost=m['vhost'],
                                 messages_ready=mready,
                                 messages_unacknowledged=munack)

                if 'deliver_details' in m['message_stats'].keys():
                    queueinfo['deliver_eps'] = round(
                        m['message_stats']['deliver_details']['rate'], 2)
                    healthlog['details']['total_deliver_eps'] += round(
                        m['message_stats']['deliver_details']['rate'], 2)
                if 'deliver_no_ack_details' in m['message_stats'].keys():
                    queueinfo['deliver_eps'] = round(
                        m['message_stats']['deliver_no_ack_details']['rate'],
                        2)
                    healthlog['details']['total_deliver_eps'] += round(
                        m['message_stats']['deliver_no_ack_details']['rate'],
                        2)
                if 'publish_details' in m['message_stats'].keys():
                    queueinfo['publish_eps'] = round(
                        m['message_stats']['publish_details']['rate'], 2)
                    healthlog['details']['total_publish_eps'] += round(
                        m['message_stats']['publish_details']['rate'], 2)
                healthlog['details']['queues'].append(queueinfo)

        # post to elastic search servers directly without going through
        # message queues in case there is an availability issue
        es.save_event(index=index,
                      doc_type='mozdefhealth',
                      body=json.dumps(healthlog))
        # post another doc with a static docid and tag
        # for use when querying for the latest status
        healthlog['tags'] = ['mozdef', 'status', 'latest']
        es.save_event(index=index,
                      doc_type='mozdefhealth',
                      doc_id=getDocID(server),
                      body=json.dumps(healthlog))
Example #48
0
    def func(self):
        "Show list."

        global _IDMAPPER
        if not _IDMAPPER:
            from evennia.utils.idmapper import models as _IDMAPPER

        if "flushmem" in self.switches:
            # flush the cache
            prev, _ = _IDMAPPER.cache_size()
            nflushed = _IDMAPPER.flush_cache()
            now, _ = _IDMAPPER.cache_size()
            string = "The Idmapper cache freed |w{idmapper}|n database objects.\n" \
                     "The Python garbage collector freed |w{gc}|n Python instances total."
            self.caller.msg(string.format(idmapper=(prev - now), gc=nflushed))
            return

        # display active processes

        os_windows = os.name == "nt"
        pid = os.getpid()

        if os_windows:
            # Windows requires the psutil module to even get paltry
            # statistics like this (it's pretty much worthless,
            # unfortunately, since it's not specific to the process) /rant
            try:
                import psutil
                has_psutil = True
            except ImportError:
                has_psutil = False

            if has_psutil:
                loadavg = psutil.cpu_percent()
                _mem = psutil.virtual_memory()
                rmem = _mem.used / (1000.0 * 1000)
                pmem = _mem.percent

                if "mem" in self.switches:
                    string = "Total computer memory usage: {w%g{n MB (%g%%)"
                    self.caller.msg(string % (rmem, pmem))
                    return
                # Display table
                loadtable = EvTable("property", "statistic", align="l")
                loadtable.add_row("Total CPU load", "%g %%" % loadavg)
                loadtable.add_row("Total computer memory usage",
                                  "%g MB (%g%%)" % (rmem, pmem))
                loadtable.add_row("Process ID", "%g" % pid),
            else:
                loadtable = "Not available on Windows without 'psutil' library " \
                            "(install with {wpip install psutil{n)."

        else:
            # Linux / BSD (OSX) - proper pid-based statistics

            global _RESOURCE
            if not _RESOURCE:
                import resource as _RESOURCE

            loadavg = os.getloadavg()[0]
            rmem = float(
                os.popen('ps -p %d -o %s | tail -1' %
                         (pid, "rss")).read()) / 1000.0  # resident memory
            vmem = float(
                os.popen('ps -p %d -o %s | tail -1' %
                         (pid, "vsz")).read()) / 1000.0  # virtual memory
            pmem = float(
                os.popen(
                    'ps -p %d -o %s | tail -1' %
                    (pid,
                     "%mem")).read())  # percent of resident memory to total
            rusage = _RESOURCE.getrusage(_RESOURCE.RUSAGE_SELF)

            if "mem" in self.switches:
                string = "Memory usage: RMEM: {w%g{n MB (%g%%), " \
                         " VMEM (res+swap+cache): {w%g{n MB."
                self.caller.msg(string % (rmem, pmem, vmem))
                return

            loadtable = EvTable("property", "statistic", align="l")
            loadtable.add_row("Server load (1 min)", "%g" % loadavg)
            loadtable.add_row("Process ID", "%g" % pid),
            loadtable.add_row("Memory usage", "%g MB (%g%%)" % (rmem, pmem))
            loadtable.add_row("Virtual address space", "")
            loadtable.add_row("{x(resident+swap+caching){n", "%g MB" % vmem)
            loadtable.add_row(
                "CPU time used (total)", "%s (%gs)" %
                (utils.time_format(rusage.ru_utime), rusage.ru_utime))
            loadtable.add_row(
                "CPU time used (user)", "%s (%gs)" %
                (utils.time_format(rusage.ru_stime), rusage.ru_stime))
            loadtable.add_row(
                "Page faults", "%g hard,  %g soft, %g swapouts" %
                (rusage.ru_majflt, rusage.ru_minflt, rusage.ru_nswap))
            loadtable.add_row(
                "Disk I/O",
                "%g reads, %g writes" % (rusage.ru_inblock, rusage.ru_oublock))
            loadtable.add_row(
                "Network I/O",
                "%g in, %g out" % (rusage.ru_msgrcv, rusage.ru_msgsnd))
            loadtable.add_row(
                "Context switching", "%g vol, %g forced, %g signals" %
                (rusage.ru_nvcsw, rusage.ru_nivcsw, rusage.ru_nsignals))

        # os-generic

        string = "{wServer CPU and Memory load:{n\n%s" % loadtable

        # object cache count (note that sys.getsiseof is not called so this works for pypy too.
        total_num, cachedict = _IDMAPPER.cache_size()
        sorted_cache = sorted([(key, num)
                               for key, num in cachedict.items() if num > 0],
                              key=lambda tup: tup[1],
                              reverse=True)
        memtable = EvTable("entity name", "number", "idmapper %", align="l")
        for tup in sorted_cache:
            memtable.add_row(tup[0], "%i" % tup[1],
                             "%.2f" % (float(tup[1]) / total_num * 100))

        string += "\n{w Entity idmapper cache:{n %i items\n%s" % (total_num,
                                                                  memtable)

        # return to caller
        self.caller.msg(string)
Example #49
0
        elif 'USER' in argc:
            USER = argc.split('USER='******'PASSWORD' in argc:
            PASSWORD = argc.split('PASSWORD='******'INTERVAL' in argc:
            INTERVAL = int(argc.split('INTERVAL=')[-1])
    socket.setdefaulttimeout(30)
    get_realtime_date()
    while True:
        if True:
            timer = 0
            while True:
                CPU = get_cpu()
                NET_IN, NET_OUT = liuliang()
                Uptime = get_uptime()
                Load_1, Load_5, Load_15 = os.getloadavg()
                MemoryTotal, MemoryUsed, SwapTotal, SwapFree = get_memory()
                HDDTotal, HDDUsed = get_hdd()
                IP_STATUS = ip_status()

                array = {}
                if not timer:
                    array['online4'] = get_network(4)
                    array['online6'] = get_network(6)
                    timer = 10
                else:
                    timer -= 1*INTERVAL

                array['uptime'] = Uptime
                array['load_1'] = Load_1
                array['load_5'] = Load_5
Example #50
0
#!/usr/bin/python3
import os, sys, threading, requests
#1
print("PID is", os.getpid())
#2
if (sys.platform == "linux"):
    print("load average is ", os.getloadavg())
#4
arr = [
    "https://api.github.com", "http://bilgisayar.mu.edu.tr/",
    "https://www.python.org/", "http://akrepnalan.com/ceng2034",
    "https://github.com/caesarsalad/wow"
]


def request(url):
    res = requests.get(url)
    res_code = res.status_code

    if (200 <= res_code <= 300):
        print(url + " URL is valid")
    else:
        print(url + " URL is invalid")


for i in range(0, 5):
    thread1 = threading.Thread(target=request, args=(arr[i], ))
    thread1.start()
#3
load_avg = os.getloadavg()
print("Cpu count is ", os.cpu_count())
Example #51
0
            rx_buf[index] = net.bytes_recv + net_rx_offset

            prev_index = index
            index = (index + 1) % REPORTING_CADENCE

            # It's time to file a status update? If so, first, gather bits of
            # information that don't need to be averaged over time. Some of these
            # shouldn't change between boots, but the whole point of M&C is to be
            # sure ...

            if index == 0:
                hostname = socket.gethostname()
                ip_address = get_ip_address()
                system_time = Time.now()
                num_cores = os.sysconf('SC_NPROCESSORS_ONLN')
                cpu_load_pct = os.getloadavg()[1] / num_cores * 100.
                uptime_days = (time.time() - psutil.boot_time()) / 86400.

                memory_size_gb = vmem.total / 1024**3  # bytes => GiB

                # We only track disk usage on the root filesystem partition. We could
                # potentially use `psutil.disk_partitions(all=False)` to try to track
                # all physical disks. But the most important non-root disks to monitor
                # are the pots, and the Librarian reports their status to M&C through
                # specialized channels.

                disk = psutil.disk_usage('/')
                disk_size_gb = disk.total / 1024**3  # bytes => GiB
                disk_space_pct = disk.percent  # note, this is misnamed a bit - it's the % used

                # Compute the longer averages. We have advanced `index` and
Example #52
0
    async def top(self, ctx, show_processes: bool = True):
        """Snapshot of real-time system information and tasks"""

        # sleep some time
        psutil.cpu_percent(interval=None, percpu=True)
        await asyncio.sleep(1)
        procs = []
        procs_status = {}
        for p in psutil.process_iter():
            try:
                p.dict = p.as_dict([
                    'username', 'nice', 'memory_info', 'memory_percent',
                    'cpu_percent', 'cpu_times', 'name', 'status'
                ])
                try:
                    procs_status[p.dict['status']] += 1
                except KeyError:
                    procs_status[p.dict['status']] = 1
            except psutil.NoSuchProcess:
                pass
            else:
                procs.append(p)

        # return processes sorted by CPU percent usage
        processes = sorted(procs,
                           key=lambda p: p.dict['cpu_percent'],
                           reverse=True)

        # Print system-related info, above the process list
        msg = ""
        num_procs = len(procs)

        def get_dashes(perc):
            dashes = "|" * int((float(perc) / 10 * 4))
            empty_dashes = " " * (40 - len(dashes))
            return dashes, empty_dashes

        # cpu usage
        percs = psutil.cpu_percent(interval=0, percpu=True)
        for cpu_num, perc in enumerate(percs):
            dashes, empty_dashes = get_dashes(perc)
            msg += " CPU{0:<2} [{1}{2}] {3:>5}%\n".format(
                cpu_num, dashes, empty_dashes, perc)
        mem = psutil.virtual_memory()
        dashes, empty_dashes = get_dashes(mem.percent)
        msg += " Mem   [{0}{1}] {2:>5}% {3:>6} / {4}\n".format(
            dashes, empty_dashes, mem.percent,
            str(int(mem.used / 1024 / 1024)) + "M",
            str(int(mem.total / 1024 / 1024)) + "M")

        # swap usage
        swap = psutil.swap_memory()
        dashes, empty_dashes = get_dashes(swap.percent)
        msg += " Swap  [{0}{1}] {2:>5}% {3:>6} / {4}\n".format(
            dashes, empty_dashes, swap.percent,
            str(int(swap.used / 1024 / 1024)) + "M",
            str(int(swap.total / 1024 / 1024)) + "M")

        # processes number and status
        st = []
        for x, y in procs_status.items():
            if y:
                st.append("%s=%s" % (x, y))
        st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=True)
        msg += " Processes: {0} ({1})\n".format(num_procs, ', '.join(st))
        # load average, uptime
        uptime = datetime.datetime.now() - datetime.datetime.fromtimestamp(
            psutil.boot_time())
        if not hasattr(os, "getloadavg"):
            msg += " Load average: N/A  Uptime: {0}".format(
                str(uptime).split('.')[0])
        else:
            av1, av2, av3 = os.getloadavg()
            msg += " Load average: {0:.2f} {1:.2f} {2:.2f}  Uptime: {3}".format(
                av1, av2, av3,
                str(uptime).split('.')[0])
        await self._say(ctx, msg)

        # print processes
        if show_processes:
            template = "{0:<6} {1:<9} {2:>5} {3:>8} {4:>8} {5:>8} {6:>6} {7:>10}  {8:>2}\n"
            msg = template.format("PID", "USER", "NI", "VIRT", "RES", "CPU%",
                                  "MEM%", "TIME+", "NAME")
            for p in processes:
                # TIME+ column shows process CPU cumulative time and it
                # is expressed as: "mm:ss.ms"
                if p.dict['cpu_times'] is not None:
                    ctime = datetime.timedelta(
                        seconds=sum(p.dict['cpu_times']))
                    ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60,
                                          str((ctime.seconds % 60)).zfill(2),
                                          str(ctime.microseconds)[:2])
                else:
                    ctime = ''
                if p.dict['memory_percent'] is not None:
                    p.dict['memory_percent'] = round(p.dict['memory_percent'],
                                                     1)
                else:
                    p.dict['memory_percent'] = ''
                if p.dict['cpu_percent'] is None:
                    p.dict['cpu_percent'] = ''
                if p.dict['username']:
                    username = p.dict['username'][:8]
                else:
                    username = ''
                msg += template.format(
                    p.pid, username, p.dict['nice'] or '',
                    self._size(getattr(p.dict['memory_info'], 'vms', 0)),
                    self._size(getattr(p.dict['memory_info'], 'rss',
                                       0)), p.dict['cpu_percent'],
                    p.dict['memory_percent'], ctime, p.dict['name'] or '')
            await self._say(ctx, msg)
        return
Example #53
0
#!/usr/bin/python3
import os
import psutil
import socket
import time

TARGET_IP = "10.0.13.50"
TARGET_PORT = 1234
SLEEP = 1
LOAD_INDEX = 1  # 0=load1, 1=load5, 2=load15

while True:
    load = os.getloadavg()[LOAD_INDEX]
    net_io = psutil.net_io_counters(pernic=False)
    download = net_io.bytes_recv
    upload = net_io.bytes_sent

    time.sleep(SLEEP)

    net_io = psutil.net_io_counters(pernic=False)
    download = net_io.bytes_recv - download
    upload = net_io.bytes_sent - upload

    out = ",".join(map(lambda x: str(round(x, 3)), [load, download, upload]))
    print(out)
    socket.socket(socket.AF_INET,
                  socket.SOCK_DGRAM).sendto(out.encode("utf-8"),
                                            (TARGET_IP, TARGET_PORT))
Example #54
0
    def status(request):
        """
        Returns a variety of information about both
        Cuckoo and the operating system.
        :return: Dictionary
        """
        # In order to keep track of the diskspace statistics of the temporary
        # directory we create a temporary file so we can statvfs() on that.
        temp_file = Files.temp_put("")

        paths = dict(
            binaries=cwd("storage", "binaries"),
            analyses=cwd("storage", "analyses"),
            temporary=os.path.dirname(temp_file),
        )

        diskspace = {}
        for key, path in paths.items():
            if hasattr(os, "statvfs") and os.path.isdir(path):
                stats = os.statvfs(path)
                diskspace[key] = dict(
                    free=stats.f_bavail * stats.f_frsize,
                    total=stats.f_blocks * stats.f_frsize,
                    used=(stats.f_blocks - stats.f_bavail) * stats.f_frsize,
                )

        # Now we remove the temporary file and its parent directory.
        os.unlink(temp_file)

        # Get the CPU load.
        if hasattr(os, "getloadavg"):
            cpuload = os.getloadavg()
        else:
            cpuload = []

        try:
            cpucount = multiprocessing.cpu_count()
        except NotImplementedError:
            cpucount = 1

        if os.path.isfile("/proc/meminfo"):
            values = {}
            for line in open("/proc/meminfo"):
                key, value = line.split(":", 1)
                values[key.strip()] = value.replace("kB", "").strip()

            if "MemAvailable" in values and "MemTotal" in values:
                memavail = int(values["MemAvailable"])
                memtotal = int(values["MemTotal"])
                memory = 100 - 100.0 * memavail / memtotal
            else:
                memory = memavail = memtotal = None
        else:
            memory = memavail = memtotal = None

        data = dict(
            version=version,
            hostname=socket.gethostname(),
            machines=dict(total=len(db.list_machines()),
                          available=db.count_machines_available()),
            tasks=dict(total=db.count_tasks(),
                       pending=db.count_tasks("pending"),
                       running=db.count_tasks("running"),
                       completed=db.count_tasks("completed"),
                       reported=db.count_tasks("reported")),
            diskspace=diskspace,
            cpucount=cpucount,
            cpuload=cpuload,
            memory=memory,
            memavail=memavail,
            memtotal=memtotal,
        )

        return JsonResponse({"status": True, "data": data})
Example #55
0
 def update(self):
     """Get the latest system information."""
     import psutil
     if self.type == 'disk_use_percent':
         self._state = psutil.disk_usage(self.argument).percent
     elif self.type == 'disk_use':
         self._state = round(psutil.disk_usage(self.argument).used /
                             1024**3, 1)
     elif self.type == 'disk_free':
         self._state = round(psutil.disk_usage(self.argument).free /
                             1024**3, 1)
     elif self.type == 'memory_use_percent':
         self._state = psutil.virtual_memory().percent
     elif self.type == 'memory_use':
         self._state = round((psutil.virtual_memory().total -
                              psutil.virtual_memory().available) /
                             1024**2, 1)
     elif self.type == 'memory_free':
         self._state = round(psutil.virtual_memory().available / 1024**2, 1)
     elif self.type == 'swap_use_percent':
         self._state = psutil.swap_memory().percent
     elif self.type == 'swap_use':
         self._state = round(psutil.swap_memory().used / 1024**3, 1)
     elif self.type == 'swap_free':
         self._state = round(psutil.swap_memory().free / 1024**3, 1)
     elif self.type == 'processor_use':
         self._state = round(psutil.cpu_percent(interval=None))
     elif self.type == 'process':
         if any(self.argument in l.name() for l in psutil.process_iter()):
             self._state = STATE_ON
         else:
             self._state = STATE_OFF
     elif self.type == 'network_out' or self.type == 'network_in':
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             counter = counters[self.argument][IO_COUNTER[self.type]]
             self._state = round(counter / 1024**2, 1)
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'packets_out' or self.type == 'packets_in':
         counters = psutil.net_io_counters(pernic=True)
         if self.argument in counters:
             self._state = counters[self.argument][IO_COUNTER[self.type]]
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'ipv4_address' or self.type == 'ipv6_address':
         addresses = psutil.net_if_addrs()
         if self.argument in addresses:
             self._state = addresses[self.argument][IF_ADDRS[self.type]][1]
         else:
             self._state = STATE_UNKNOWN
     elif self.type == 'last_boot':
         self._state = dt_util.as_local(
             dt_util.utc_from_timestamp(psutil.boot_time())
         ).date().isoformat()
     elif self.type == 'since_last_boot':
         self._state = dt_util.utcnow() - dt_util.utc_from_timestamp(
             psutil.boot_time())
     elif self.type == 'load_1m':
         self._state = os.getloadavg()[0]
     elif self.type == 'load_5m':
         self._state = os.getloadavg()[1]
     elif self.type == 'load_15m':
         self._state = os.getloadavg()[2]
Example #56
0
 def getloadavg_1m():
     return os.getloadavg()[0]
def get_load():
    try:
        return os.getloadavg()[0]
    except:
        return -1.0
#!/usr/bin/python3

import os
import requests
import sys
import hashlib
import uuid
import time
import multiprocessing
from multiprocessing import Pool


start_time=time.time()

print("There are %d CPUs on this machine" % multiprocessing.cpu_count())
print("Load avg:", os.getloadavg())


url = ["http://wiki.netseclab.mu.edu.tr/images/thumb/f/f7/MSKU-BlockchainResearchGroup.jpeg/300px-MSKU-BlockchainResearchGroup.jpeg",
"https://upload.wikimedia.org/wikipedia/tr/9/98/Mu%C4%9Fla_S%C4%B1tk%C4%B1_Ko%C3%A7man_%C3%9Cniversitesi_logo.png",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Hawai%27i.jpg/1024px-Hawai%27i.jpg",
"http://wiki.netseclab.mu.edu.tr/images/thumb/f/f7/MSKU-BlockchainResearchGroup.jpeg/300px-MSKU-BlockchainResearchGroup.jpeg",
"https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Hawai%27i.jpg/1024px-Hawai%27i.jpg"]

def child_process(url): 

	pid = os.fork() 

	if pid > 0: 
		print("Parent process and pid is : ", os.getpid()) 
		#avoid the orphan process
 def run(self):
     while True:
         cpu_avg = os.getloadavg()[0]
         dht.ip_to_cpu[sys.argv[1]] = cpu_avg
         time.sleep(5)
Example #60
0
    def sysdata(self):
        sys = {"max_used_percent": 0, "temp_unit": self.temp_unit}

        if self.init["cpu_freq"]:
            cpu_freqs = self._calc_cpu_freqs(
                self._get_cpuinfo(), self.cpu_freq_unit, self.init["cpu_freq"]
            )
            cpu_freq_keys = ["cpu_freq_avg", "cpu_freq_max"]
            sys.update(zip(cpu_freq_keys, cpu_freqs))

        if self.init["stat"]:
            stat = self._get_stat()

            if self.init["cpu_percent"]:
                cpu = self._filter_stat(stat, avg=True)
                sys["cpu_used_percent"] = self._calc_cpu_percent(cpu)

            if self.init["cpu_per_core"]:
                cpu_keys = ["name", "used_percent"]
                new_cpu = []
                for cpu in self._filter_stat(stat):
                    cpu = dict(zip(cpu_keys, [cpu[0], self._calc_cpu_percent(cpu)]))
                    for x in self.thresholds_init["format_cpu"]:
                        if x in cpu:
                            self.py3.threshold_get_color(cpu[x], x)
                    new_cpu.append(self.py3.safe_format(self.format_cpu, cpu))

                format_cpu_separator = self.py3.safe_format(self.format_cpu_separator)
                format_cpu = self.py3.composite_join(format_cpu_separator, new_cpu)
                sys["format_cpu"] = format_cpu

        if self.init["cpu_temp"]:
            sys["cpu_temp"] = self._get_cputemp(self.zone, self.temp_unit)

        if self.init["load"]:
            load_keys = ["load1", "load5", "load15"]
            sys.update(zip(load_keys, getloadavg()))

        if self.init["meminfo"]:
            meminfo = self._get_meminfo()

            if self.init["mem"]:
                mem = self._calc_mem_info(self.mem_unit, meminfo, True)
                mem_keys = [
                    "mem_total",
                    "mem_total_unit",
                    "mem_used",
                    "mem_used_unit",
                    "mem_used_percent",
                    "mem_free",
                    "mem_free_unit",
                    "mem_free_percent",
                ]
                sys.update(zip(mem_keys, mem))

            if self.init["swap"]:
                swap = self._calc_mem_info(self.swap_unit, meminfo, False)
                swap_keys = [
                    "swap_total",
                    "swap_total_unit",
                    "swap_used",
                    "swap_used_unit",
                    "swap_used_percent",
                    "swap_free",
                    "swap_free_unit",
                    "swap_free_percent",
                ]
                sys.update(zip(swap_keys, swap))

        sys["max_used_percent"] = max(
            [perc for name, perc in sys.items() if "used_percent" in name]
        )

        for x in self.thresholds_init["format"]:
            if x in sys:
                self.py3.threshold_get_color(sys[x], x)
            elif x in self.thresholds_init["legacy"]:
                y = self.thresholds_init["legacy"][x]
                if y in sys:
                    self.py3.threshold_get_color(sys[y], x)

        self.first_run = False

        return {
            "cached_until": self.py3.time_in(self.cache_timeout),
            "full_text": self.py3.safe_format(self.format, sys),
        }