def cpu_info(): """Returns cpu data. """ return { 'count': psutil.cpu_count(logical=False), 'logical': psutil.cpu_count() }
def __init__(self, nthreads=psutil.cpu_count(logical=True), nwalkers=25*psutil.cpu_count(logical=True), nsteps=250, maxEvals=10000, xTol=0.01, mcmcA=2.0): try: assert nthreads > 0, r'nthreads must be greater than 0' assert isinstance(nthreads, int), r'nthreads must be an integer' assert nwalkers > 0, r'nwalkers must be greater than 0' assert isinstance(nwalkers, int), r'nwalkers must be an integer' assert nsteps > 0, r'nsteps must be greater than 0' assert isinstance(nsteps, int), r'nsteps must be an integer' assert maxEvals > 0, r'maxEvals must be greater than 0' assert isinstance(maxEvals, int), r'maxEvals must be an integer' assert xTol > 0.0, r'xTol must be greater than 0' assert isinstance(xTol, float), r'xTol must be a float' self._ndims = self.r self._nthreads = nthreads self._nwalkers = nwalkers self._nsteps = nsteps self._maxEvals = maxEvals self._xTol = xTol self._mcmcA = mcmcA self._Chain = np.require( np.zeros(self._ndims*self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E']) self._LnPrior = np.require( np.zeros(self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E']) self._LnLikelihood = np.require( np.zeros(self._nwalkers*self._nsteps), requirements=['F', 'A', 'W', 'O', 'E']) self._taskCython = MBHBTask_cython.MBHBTask_cython(self._nthreads) self._pDIC = None self._dic = None except AssertionError as err: raise AttributeError(str(err))
def util_cpu_status(): """ Utilization CPU. But weird i got bugs during counting cpu phisical / logical. """ Number_of_CPUs = str(psutil.cpu_count()) Number_of_Physical_CPUs = str(psutil.cpu_count(logical=False)) iu = psutil.cpu_percent(interval=1, percpu=True) lenght = len(iu) c = 0; m = [] while c <= lenght: for cn in iu: if cn >= 90: #return "Status Critical. Uusage CPU count" + str(c) + str(cn) + "%" status = "Status Critical. Core " + str(c)# + " Util CPU " + str(cn) + "%" m.append(status) if cn >= 75 and cn <90: #return "Status Warning. Usage CPU count" + str(c) + str(cn) + "%" status = "Status Warning. Core " + str(c) #+ " Util CPU " + str(cn) + "%" m.append(status) if cn < 75: #return "Status OK. Utilization is OK","Kernel count :" + str(c) status = "Status OK. Core " + str(c) # + " Util CPU " + str(cn) + "%" m.append(status) c = c + 1 rs = (str(m).replace("[","")).replace("]","").replace("'","") #print (rs) return (rs)
def performance(attribute): """ A little smater routing system. """ data = None if attribute == 'system': data = { 'system': platform.system() } elif attribute == 'processor': data = { 'processor': platform.processor() } elif attribute in ['cpu_count', 'cpucount'] : data = { 'cpu_count': psutil.cpu_count() } elif attribute == 'machine': data = { 'machine': platform.machine() } elif attribute in ['virtual_mem', 'virtualmem']: data = { 'virtual_mem': psutil.virtual_memory().total } elif attribute in ['virtual_mem_gb', 'virtualmemgb']: data = { 'virtual_mem_gb': psutil.virtual_memory().total / (1024.0 ** 3) } elif attribute == 'all': data = { 'system': platform.system(), 'processor': platform.processor(), 'cpu_count': psutil.cpu_count(), 'machine': platform.machine(), 'virtual_mem': psutil.virtual_memory().total, 'virtual_mem_gb': psutil.virtual_memory().total / (1024.0 ** 3), } packet = json.dumps(data) resp = Response(packet, status=200, mimetype='application/json') return(resp)
def CPU(self): cpu_label=str(commands.getoutput('grep "model name" /proc/cpuinfo | awk -F ": " \'{print $2}\' | head -1')) cpu_cache=str(commands.getoutput('grep "cache size" /proc/cpuinfo|uniq|awk \'{print $4,$5}\'')) cpu_time=psutil.cpu_times() cpu_logical_nums=psutil.cpu_count() cpu_physical_nums=psutil.cpu_count(logical=False) return json.dumps({"Label": str(cpu_label), "Logical": int(cpu_logical_nums), "Cache_size": str(cpu_cache)})
def check(self, agentConfig): try: self.wmi_sampler.sample() except TimeoutException: self.logger.warning( u"Timeout while querying Win32_PerfRawData_PerfOS_Processor WMI class." u" CPU metrics will be returned at next iteration." ) return if not (len(self.wmi_sampler)): self.logger.info('Missing Win32_PerfRawData_PerfOS_Processor WMI class.' ' No CPU metrics will be returned') return cpu_interrupt = self._average_metric(self.wmi_sampler, 'PercentInterruptTime') if cpu_interrupt is not None: self.save_sample('system.cpu.interrupt', cpu_interrupt) cpu_percent = psutil.cpu_times() self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.cpu_count()) self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.cpu_count()) self.save_sample('system.cpu.system', 100 * cpu_percent.system / psutil.cpu_count()) return self.get_metrics()
def update(self): """Update core stats. Stats is a dict (with both physical and log cpu number) instead of a integer. """ # Reset the stats self.reset() if self.input_method == 'local': # Update stats using the standard system lib # The PSUtil 2.0 include psutil.cpu_count() and psutil.cpu_count(logical=False) # Return a dict with: # - phys: physical cores only (hyper thread CPUs are excluded) # - log: logical CPUs in the system # Return None if undefine try: self.stats["phys"] = psutil.cpu_count(logical=False) self.stats["log"] = psutil.cpu_count() except NameError: self.reset() elif self.input_method == 'snmp': # Update stats using SNMP # http://stackoverflow.com/questions/5662467/how-to-find-out-the-number-of-cpus-using-snmp pass return self.stats
def _insert_node_status(self): if 'PBS_JOBID' in os.environ: job_id = os.environ['PBS_JOBID'] else: job_id = None with scinetutil.transaction(self._conn, self._must_exit, False) as trx: query = '''INSERT INTO nodes ( hostname, process_id, job_id, physical_cpus, logical_cpus, created_at, last_updated ) VALUES (%s, %s, %s, %s, %s, NOW(), NOW()) RETURNING id ''' trx.execute(query, ( socket.gethostname(), os.getpid(), job_id, psutil.cpu_count(logical=False), psutil.cpu_count(logical=True), )) node_id = trx.fetchone()[0] return node_id
def mch_cpu(): """ This function will gather cpu information. Needs root permission. Parameters: none return: dictionary """ uid = os.getuid() proc = subprocess.Popen('lscpu', stdout=subprocess.PIPE) output = proc.stdout.read() outlist = output.split('\n') num_thrds = int(filter(str.isdigit, outlist[5])) model = outlist[0].split(' ')[-1] mod_name = outlist[12][23:] speed = outlist[14].split(' ')[-1] if (uid == 0): proc2 = subprocess.Popen(["dmidecode", "-s", "system-serial-number"], stdout=subprocess.PIPE, stdin=subprocess.PIPE) (out, err) = proc2.communicate() cpu_info = {"num_cpus":psutil.cpu_count() , "num_cores_per_cpu":psutil.cpu_count()/ psutil.cpu_count(logical=False), "num_threads_per_core":num_thrds, "cpu_model":model, "cpu_model_name":mod_name, "cpu_speed":speed, "serial_number":out} elif (uid != 0 ): cpu_info = {"num_cpus":psutil.cpu_count() , "num_cores_per_cpu":psutil.cpu_count()/ psutil.cpu_count(logical=False), "num_threads_per_core":num_thrds, "cpu_model":model, "cpu_model_name":mod_name, "cpu_speed":speed, "serial_number":"Need Root!"} return cpu_info
async def _set_vcpus_ram(self, vcpus, ram): """ Set the number of vCPU cores and amount of RAM for the GNS3 VM. :param vcpus: number of vCPU cores :param ram: amount of RAM """ # memory must be a multiple of 4 (VMware requirement) if ram % 4 != 0: raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram)) available_vcpus = psutil.cpu_count(logical=True) if vcpus > available_vcpus: raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus)) try: pairs = VMware.parse_vmware_file(self._vmx_path) if vcpus > 1: pairs["numvcpus"] = str(vcpus) cores_per_sockets = int(vcpus / psutil.cpu_count(logical=False)) if cores_per_sockets > 1: pairs["cpuid.corespersocket"] = str(cores_per_sockets) pairs["memsize"] = str(ram) VMware.write_vmx_file(self._vmx_path, pairs) log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram)) except OSError as e: raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e))
def cpu_num(self, ret, proc): self.assertIsInstance(ret, int) if FREEBSD and ret == -1: return self.assertGreaterEqual(ret, 0) if psutil.cpu_count() == 1: self.assertEqual(ret, 0) self.assertIn(ret, list(range(psutil.cpu_count())))
def get_cup_info(): global fg print "\033[31m%scpu状态信息%s\033[0m" %(fg*30,fg*30) print "cpu 逻辑个数:%s个" % psutil.cpu_count() print "cpu 物理个数:%s个" % psutil.cpu_count(logical=False) print "cpu 用户空间占用百分比:%s" % (psutil.cpu_times_percent(interval=1).user)+"%" print "cpu 内核空间占用百分比:%s" % (psutil.cpu_times_percent(interval=1).system)+"%" print "cpu 空闲空间剩余百分比:%s" % (psutil.cpu_times_percent(interval=1).idle)+"%"
def __init__(self): super(CPUInfo,self).__init__() self._total = psutil.cpu_count() self._physical = psutil.cpu_count(logical=False) self["total"] = self._total self["physical"] = self._physical self["usage"] = psutil.cpu_percent(interval=1, percpu=True)
def check(self, agentConfig): cpu_percent = psutil.cpu_times() self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.cpu_count()) self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.cpu_count()) self.save_sample('system.cpu.system', 100 * cpu_percent.system / psutil.cpu_count()) self.save_sample('system.cpu.interrupt', 100 * cpu_percent.interrupt / psutil.cpu_count()) return self.get_metrics()
def _getdevices(self): if self.ncpu > self.maxcpu: raise ValueError('The ncpu ({}) cannot be greater than the maxcpu ({})'.format(self.ncpu, self.maxcpu)) if self.maxcpu > psutil.cpu_count(): logger.warning('maxcpu ({}) higher than the total ammount of CPU threads available ({}). ' 'Overclocking.'.format(self.maxcpu, psutil.cpu_count())) devices = int(self.maxcpu / self.ncpu) logger.info("Using {} CPU \"devices\" ({} / {})".format(devices, self.maxcpu, self.ncpu)) return [None] * devices
def cpu(): cpu_times_percent = psutil.cpu_times_percent() cpu_times = psutil.cpu_times() text = u"CPU逻辑个数:%d\n" % psutil.cpu_count() text = text + (u"CPU物理个数:%d\n" % psutil.cpu_count(logical=False)) text = text + (u'CPU时间比:[用户进程:%s 内核进程:%s 空闲(IDLE):%s]\n' % (cpu_times.user,cpu_times.system,cpu_times.idle)) text = text + (u'CPU当前使用率:[用户进程:%s%% 内核进程:%s%% 空闲(IDLE):%s%%]\n' % (cpu_times_percent.user,cpu_times_percent.system,cpu_times_percent.idle)) text = text + (u'CPU当前总使用率:%s%%' % psutil.cpu_percent()) sysinfo.label2.setText(sysinfo.tr(text))
def CPU(): cpu_info = {} cpu_info['PROCCESS_COUNT'] = str(psutil.cpu_count(logical=False)) cpu_info['THREAD_COUNT'] = str(psutil.cpu_count()) save_rsc_file('CPU_INFO', [cpu_info,], False) cpu_list = [] for cpu_rsc_item in psutil.cpu_times_percent(interval=None, percpu=True): cpu_rsc_data = {} cpu_rsc_data['TOTAL_USERATE'] = cpu_rsc_item.user + cpu_rsc_item.system cpu_rsc_data['USER_USERATE'] = cpu_rsc_item.user cpu_rsc_data['SYSTEM_USERATE'] = cpu_rsc_item.system cpu_rsc_data['IDLE_TIME'] = cpu_rsc_item.idle try: cpu_rsc_data['NICE'] = cpu_rsc_item.nice except: pass try: cpu_rsc_data['IO_WAIT'] = cpu_rsc_item.iowait except: pass try: cpu_rsc_data['IRQ'] = cpu_rsc_item.irq except: pass try: cpu_rsc_data['SOFTIRQ'] = cpu_rsc_item.softirq except: pass try: cpu_rsc_data['STEAL'] = cpu_rsc_item.steal except: pass try: cpu_rsc_data['GUEST'] = cpu_rsc_item.guest except: pass try: cpu_rsc_data['GUEST_NICE'] = cpu_rsc_item.guest_nice except: pass cpu_list.append(cpu_rsc_data) save_rsc_file('CPU', cpu_list, True)
def _register_worker(redis, worker_uuid): redis.hmset(get_worker_hash_key(worker_uuid), { 'hostname': gethostname(), 'pid': os.getpid(), 'cpu_cores': psutil.cpu_count(logical=True), 'cpu_threads': psutil.cpu_count(logical=False), 'virtual-mem': psutil.virtual_memory(), 'swap-mem': psutil.swap_memory(), 'registered': strftime(datetime.utcnow()) })
def proc_cpuinfo(self): """ cat /proc/cpuinfo """ self.meta['processor']['cpus'] = psutil.cpu_count(logical=False) self.meta['processor']['cores'] = psutil.cpu_count() proc_cpuinfo_out, _ = subp.call('sysctl hw.model') for line in proc_cpuinfo_out: kv = re.match(self.proc_cpuinfo_re, line) if kv: key, value = kv.group(1), kv.group(2) if key.startswith('hw.model'): self.meta['processor']['model'] = value
def get_cpu(self): info = psutil.cpu_times() res = dict( user=info.user, # 执行用户进程的时间百分比 system=info.system, # 执行内核进程和中断的时间百分比 iowait=info.iowait, # 由于IO等待而使CPU处于idle(空闲)状态的时间百分比 idle=info.idle, # CPU处于idle状态的时间百分比 cpucount1=psutil.cpu_count(), # 获取CPU的逻辑个数 cpucount2=psutil.cpu_count(logical=False) # 获取CPU的物理个数 ) print res
def test_cpu_count_none(self): # https://github.com/giampaolo/psutil/issues/1085 for val in (-1, 0, None): with mock.patch('psutil._psplatform.cpu_count_logical', return_value=val) as m: self.assertIsNone(psutil.cpu_count()) assert m.called with mock.patch('psutil._psplatform.cpu_count_physical', return_value=val) as m: self.assertIsNone(psutil.cpu_count(logical=False)) assert m.called
def _init_cpu(self): if platform.system().lower() == 'windows': self.cpu_name = platform.processor() elif platform.system().lower() == 'linux': all_info = str(subprocess.check_output('cat /proc/cpuinfo', shell=True).strip()) for line in all_info.split('\\n'): if 'model name' in line: self.cpu_name = re.sub(r'.*model name.*:', '', line, 1) self.cpu_count = psutil.cpu_count(logical=False) self.cpu_logical_count = psutil.cpu_count() self.cpu_load_stats = [['time'] + ['Core #' + str(i) for i in range(self.cpu_logical_count)]] self.cpu_temp_stats = [['time', 'CPU temperature']]
def __init__(self): self.nic='eth0' self.disk_part='/' self.pid='0' self.cpulcnt=ps.cpu_count(logical=True) self.cpupcnt=ps.cpu_count(logical=False) self.cpupercent=ps.cpu_times_percent(percpu=False) self.memtotal=ps.virtual_memory() self.diskconf=ps.disk_partitions() self.diskio=ps.disk_io_counters(perdisk=False) self.diskio2=ps.disk_io_counters(perdisk=True) self.swap=ps.swap_memory() self.netio_part=ps.net_io_counters(pernic=True)
def mch_cpu(): proc = subprocess.Popen('lscpu', stdout=subprocess.PIPE) output = proc.stdout.read() outlist = output.split('\n') num_thrds = int(filter(str.isdigit, outlist[5])) model = outlist[0].split(' ')[-1] mod_name = outlist[12][23:] speed = outlist[14].split(' ')[-1] proc2 = subprocess.Popen(["dmidecode", "-s", "system-serial-number"], stdout=subprocess.PIPE, stdin=subprocess.PIPE) (out, err) = proc2.communicate() cpu_info = {"num_cpus":psutil.cpu_count() , "num_cors_per_cpu":psutil.cpu_count()/ psutil.cpu_count(logical=False), "num_threds_per_core":num_thrds, "cpu_model":model, "cpu_model_name":mod_name, "cpu_speed":speed, "serial_number":out} return cpu_info
def test_cpu_count(self): logical = psutil.cpu_count() self.assertEqual(logical, len(psutil.cpu_times(percpu=True))) self.assertGreaterEqual(logical, 1) # if os.path.exists("/proc/cpuinfo"): with open("/proc/cpuinfo") as fd: cpuinfo_data = fd.read() if "physical id" not in cpuinfo_data: raise unittest.SkipTest("cpuinfo doesn't include physical id") physical = psutil.cpu_count(logical=False) self.assertGreaterEqual(physical, 1) self.assertGreaterEqual(logical, physical)
def _get_sysinfo(self): """ Gather system info into a dictionary for sendEvent. This is information that rarely if ever changes. Well, yeah, this changed with the uptime stats. """ sysinfo = { 'hostname': os.uname()[1], 'cpus': ps.cpu_count(logical=False), 'cpu_cores': ps.cpu_count(), 'architecture': pf.architecture()[0], 'bin_format': pf.architecture()[1], 'up_since': datetime.datetime.fromtimestamp(ps.boot_time()).strftime("%Y-%m-%d %H:%M:%S"), 'uptime': int((datetime.datetime.now() - datetime.datetime.fromtimestamp(ps.boot_time())).total_seconds() / 60), } return sysinfo
def __init__(self, redis_address, redis_password=None): """Initialize the reporter object.""" self.cpu_counts = (psutil.cpu_count(), psutil.cpu_count(logical=False)) self.ip_addr = determine_ip_address() self.hostname = os.uname().nodename _ = psutil.cpu_percent() # For initialization self.redis_key = "{}.{}".format(ray.gcs_utils.REPORTER_CHANNEL, self.hostname) self.redis_client = ray.services.create_redis_client( redis_address, password=redis_password) self.network_stats_hist = [(0, (0.0, 0.0))] # time, (sent, recv)
def sessionInfo(): """ Return a dictionary with session and run information """ version = "%s" % Tools.version result = {'name': os.path.basename(sys.argv[0]), 'timestamp': time.strftime("%a %b %d %X %Y"), 'version': version, 'runInfo': [{"key": "commandline", "value": " ".join(sys.argv)}], 'uname': " / ".join(platform.uname()), 'dist': " / ".join(platform.dist()), 'mac_ver': " / ".join([platform.mac_ver()[0], platform.mac_ver()[2]]), 'python_implementation': platform.python_implementation(), 'python_version': platform.python_version(), 'metadata': { "required": { "id": "haplotypes", 'version': version, "module": "%s" % os.path.basename(sys.argv[0]), "description": "%s generated this JSON file via command line %s" % ( sys.argv[0], " ".join(sys.argv))}}, 'environment': {str(k): str(os.environ[k]) for k in os.environ.keys()}} result["python_prefix"] = sys.prefix if hasattr(sys, 'real_prefix'): result["python_virtualenv"] = True result["python_real_prefix"] = sys.real_prefix try: import psutil result["cpus"] = psutil.cpu_count() result["logical_cpus"] = psutil.cpu_count(True) result["cpu_freq"] = psutil.cpu_freq() result["memory"] = dict(psutil.virtual_memory().__dict__) except: pass try: import pip pip_packages = [] for i in pip.get_installed_distributions(local_only=True): pip_packages.append(str(i)) result["pip_packages"] = pip_packages except: pass return result
def trun(self): global interval global snap_stap print("Writing info in txt file(SNAPSHOT #{})".format(snap_stap)) with open("monitorapp.txt", "a+") as txtfile: txtfile.write( "\nSNAPSHOT " + "%s" % snap_stap + ":" + " [%s]" % datetime.datetime.now()) txtfile.write( "\nNumber of CPUs: " +str(psutil.cpu_count())) txtfile.write( "\nNumber of Physical CPUs: " +str(psutil.cpu_count(logical=False))) txtfile.write( "\nOverall CPU load: " +str(psutil.cpu_stats())) txtfile.write( "\nOverall virtual memory usage: " +str(psutil.virtual_memory())) txtfile.write( "\nOverall swap memory usage: " +str(psutil.swap_memory())) txtfile.write( "\nIO information: " +str(psutil.disk_io_counters(perdisk=True))) txtfile.write( "\nNetwork information: " +str(psutil.net_io_counters())) txtfile.close() snap_stap += 1
def lookup(self, *name): cpus = psutil.NUM_CPUS # psutil is unstable on how to get the number of # cpus, different versions call it differently if hasattr(psutil, 'cpu_count'): cpus = psutil.cpu_count() elif hasattr(psutil, 'NUM_CPUS'): cpus = psutil.NUM_CPUS elif hasattr(psutil, '_psplatform'): for method_name in ['_get_num_cpus', 'get_num_cpus']: method = getattr(psutil._psplatform, method_name, None) if method is not None: cpus = method() break self.host_info['cpus'] = cpus if hasattr(psutil, 'phymem_usage'): self.host_info['memory'] = psutil.phymem_usage().total elif hasattr(psutil, 'virtual_memory'): self.host_info['memory'] = psutil.virtual_memory().total return self.host_info
def get_cpu_count(self, obj): return psutil.cpu_count()
logfile = 'all.log' os.makedirs(logDir, exist_ok=True) screenshotDir = logDir + 'screenshot/' os.makedirs(screenshotDir, exist_ok=True) shutdownEvent = Event() config = Config() processes = [] console = Console(ouput=not noOutput, logfile=logfile) proxyManager = ProxyManager(PROXY_FILE_LISTENER) systemStats = Stats() maxProcess = config.LISTENER_MAX_PROCESS if maxProcess < 0: maxProcess = psutil.cpu_count(logical=True) runnerStats = Array('i', 4) runnerStats[STAT_PLAYED] = 0 runnerStats[STAT_LOGGED_IN] = 0 runnerStats[STAT_ERROR] = 0 runnerStats[STAT_DRIVER_NONE] = 0 processStates = Array('i', maxProcess) messages = [] client = client('sqs') while True: try: sleep(config.LISTENER_SPAWN_INTERVAL) freeslot = maxProcess - len(processes) if freeslot:
def cpu_percent(self, interval=None): """ Rewrites original method to return two values (system and user) instead of one (overall) Return a float representing the current process CPU utilization as a percentage. When interval is 0.0 or None (default) compares process times to system CPU times elapsed since last call, returning immediately (non-blocking). That means that the first time this is called it will return a meaningful 0.0 value. When interval is > 0.0 compares process times to system CPU times elapsed before and after the interval (blocking). In this case is recommended for accuracy that this function be called with at least 0.1 seconds between calls. """ blocking = interval is not None and interval > 0.0 num_cpus = psutil.cpu_count() if psutil.POSIX: def timer(): return psutil._timer() * num_cpus else: def timer(): return sum(psutil.cpu_times()) if blocking: st1 = timer() pt1 = self._proc.cpu_times() time.sleep(interval) st2 = timer() pt2 = self._proc.cpu_times() else: st1 = self._last_sys_cpu_times pt1 = self._last_proc_cpu_times st2 = timer() pt2 = self._proc.cpu_times() if st1 is None or pt1 is None: self._last_sys_cpu_times = st2 self._last_proc_cpu_times = pt2 return 0.0, 0.0 delta_user = pt2.user - pt1.user delta_system = pt2.system - pt1.system delta_time = st2 - st1 # reset values for next call in case of interval == None self._last_sys_cpu_times = st2 self._last_proc_cpu_times = pt2 try: # The utilization split between all CPUs. # Note: a percentage > 100 is legitimate as it can result # from a process with multiple threads running on different # CPU cores, see: # http://stackoverflow.com/questions/1032357 # https://github.com/giampaolo/psutil/issues/474 user_percent = ((delta_user / delta_time) * 100) * num_cpus system_percent = ((delta_system / delta_time) * 100) * num_cpus except ZeroDivisionError: # interval was too low return 0.0, 0.0 else: return user_percent, system_percent
class MyThread(threading.Thread): def run(self): # Definição da barra com iterações e descrição do nome bar = tqdm(total=NUMBER_OF_ITERATIONS, desc=self.name) # loop for para definir as atividades e refresh da barra for i in range(NUMBER_OF_ITERATIONS): random.randint(1, 100) * random.randint(1, 100) bar.update() time.sleep(random.randint(1, 6)/10) bar.close() threads = [] # Função do psutil para contar o número de núcleos lógicos cpu_count = psutil.cpu_count(logical=True) # Laço para iniciar os núcleos da máquina e iniciar as iterações for i in range(cpu_count): t = MyThread() threads.append(t) t.start() # Laço para adicionar as threads dentro da lista for t in threads: t.join() print('\nThreads terminadas')
def _init_default(): global APP if APP is None: APP = AsyncProcessPool() APP.init(psutil.cpu_count(), affinity=0, num_io_processes=1, verbose=0)
def run_resnet50_from_artifacts(ngraph_tf_src_dir, artifact_dir, batch_size, iterations): root_pwd = os.getcwd() artifact_dir = os.path.abspath(artifact_dir) ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir) install_ngraph_bridge(artifact_dir) patch_file = os.path.abspath( os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch")) # Now clone the repo and proceed call(['git', 'clone', 'https://github.com/tensorflow/benchmarks.git']) os.chdir('benchmarks') call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed']) # Check to see if we need to patch the repo for Grappler import ngraph_bridge if ngraph_bridge.is_grappler_enabled(): print("Patching repo using: %s" % patch_file) apply_patch(patch_file) os.chdir('scripts/tf_cnn_benchmarks/') # junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd) # Update the script by adding `import ngraph_bridge` with open('convnet_builder.py', 'a') as outfile: call(['echo', 'import ngraph_bridge'], stdout=outfile) # Setup the env flags import psutil num_cores = int(psutil.cpu_count(logical=False)) print("OMP_NUM_THREADS: %s " % str(num_cores)) os.environ['OMP_NUM_THREADS'] = str(num_cores) os.environ["KMP_AFFINITY"] = 'granularity=fine,compact,1,0' # Delete the temporary model save directory model_save_dir = os.getcwd() + '/modelsavepath' if os.path.exists(model_save_dir) and os.path.isdir(model_save_dir): shutil.rmtree(model_save_dir) eval_eventlog_dir = os.getcwd() + '/eval_eventlog_dir' if os.path.exists(eval_eventlog_dir) and os.path.isdir(eval_eventlog_dir): shutil.rmtree(eval_eventlog_dir) # os.environ['JUNIT_WRAP_FILE'] = "%s/junit_training_test.xml" % build_dir # os.environ['JUNIT_WRAP_SUITE'] = 'models' # os.environ['JUNIT_WRAP_TEST'] = 'resnet50-training' # Run training job # cmd = [ # junit_script, 'python', 'tf_cnn_benchmarks.py', '--data_format', # 'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir, # '--num_batches', '10', '--model=resnet50', '--batch_size=128' # ] cmd = [ 'python', 'tf_cnn_benchmarks.py', '--data_format', 'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir, '--num_batches', str(iterations), '--model=resnet50', '--batch_size=' + str(batch_size), '--eval_dir=' + eval_eventlog_dir ] command_executor(cmd) # os.environ['JUNIT_WRAP_FILE'] = "%s/junit_inference_test.xml" % build_dir # os.environ['JUNIT_WRAP_SUITE'] = 'models' # os.environ['JUNIT_WRAP_TEST'] = 'resnet50-inference' # Run inference job # cmd = [ # junit_script, 'python', 'tf_cnn_benchmarks.py', '--data_format', # 'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir, # '--model=resnet50', '--batch_size=128', '--num_batches', '10', '--eval' # ] cmd = [ 'python', 'tf_cnn_benchmarks.py', '--data_format', 'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir, '--model=resnet50', '--batch_size=' + str(batch_size), '--num_batches', str(iterations), '--eval', '--eval_dir=' + eval_eventlog_dir ] command_executor(cmd) os.chdir(root_pwd)
wasm32 x86_64-cloudabi x86_64-linux x86_64-pc-linux-gnux32 x86_64-rdos x86_64-w64-mingw32 xgate-elf xstormy16-elf xtensa-elf z80-coff z80-elf z8k-coff ''' targets = targets.strip().split('\n') cpu_count = psutil.cpu_count() def build_and_test_target(target): try: folder = tempfile.TemporaryDirectory(prefix='/dev/shm/') os.chdir(folder.name) subprocess.check_output( '%s/configure --build=x86_64-linux --disable-gdb --disable-gdbserver --enable-obsolete --target=%s' % (sys.argv[1], target), shell=True, stderr=subprocess.DEVNULL) r = subprocess.run('make -j8', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
def __get_cpu(self): self.__cpu = dict() self.__cpu['count'] = psutil.cpu_count(logical=True) self.__cpu['percent'] = psutil.cpu_percent(interval=1, percpu=True) return self.__cpu
def test_get_cpu_count(self): sysinfo = self.service.get_sysinfo() self.assertEqual(sysinfo['num_cpus'], psutil.cpu_count())
def get_proc_info(self, proc): ''' 获取进程信息 :return: ''' try: procinfo = {} procinfo['id'] = proc.pid procinfo['name'] = proc.name() procinfo['num_threads'] = proc.num_threads() procinfo['num_handles'] = proc.num_handles() procinfo['threads'] = proc.threads() procinfo['connections'] = proc.connections() procinfo['memory_percent'] = proc.memory_percent() procinfo['memory_info'] = proc.memory_info() procinfo['cpu_affinity'] = proc.cpu_affinity() procinfo['cpu_times'] = proc.cpu_times() procinfo['p_cpu_percent'] = proc.cpu_percent(interval=self.proc_monitor_interval) procinfo['t_cpu_percent'] = psutil.cpu_percent(interval=self.proc_monitor_interval) procinfo['cpu_count_real'] = psutil.cpu_count() procinfo['cpu_count_logical'] = psutil.cpu_count(logical=False) cpu_count_real = procinfo['cpu_count_real'] cpu_count_logical = procinfo['cpu_count_logical'] p_cpu_percent = procinfo['p_cpu_percent'] t_cpu_percent = procinfo['t_cpu_percent'] return (True, p_cpu_percent, t_cpu_percent, cpu_count_real, cpu_count_logical) except Exception as e: print(e) return (False, 0, 0, 0, 0) def startup(self, exepath): """开启进程""" commands = [] try: if os.path.exists(exepath): p = psutil.Popen(commands, stdout=PIPE) except Exception as e: print(e) def termination(self, proc=None, pname=None, pid=None): '''终止进程''' try: if proc in self.all_process: proc.terminal() os.system("taskkill /PID %s", proc.pid) return True if pname: for process in self.all_process: if pname == process.name(): os.system("taskkill /PID %s", process.pid) return True if pid: for process in self.all_process: if pid == process.pid: os.system("taskkill /PID %s", pid) return True except Exception as e: print('exception failed') return False def loop_controll(self): while 1: try: # 获取配置文件中配置的所有进程 for process in self.proc_conf_list: # 是否存活 if self.is_alive_proc(proc=process): continue # 进程挂掉则拉起 time.sleep(self.getprocinfospantime) except Exception as e: print('loopControl.while :%s', e)
async def stats(self, ctx): _("""Statistics on the bot.""") async with self.bot.pool.acquire() as conn: characters = await conn.fetchval("SELECT COUNT(*) FROM profile;") items = await conn.fetchval("SELECT COUNT(*) FROM allitems;") pg_version = conn.get_server_version() pg_version = f"{pg_version.major}.{pg_version.micro} {pg_version.releaselevel}" d0 = self.bot.user.created_at d1 = datetime.datetime.now() delta = d1 - d0 myhours = delta.days * 1.5 sysinfo = distro.linux_distribution() if self.bot.owner_ids: owner = " and ".join( [str(await self.bot.get_user_global(u)) for u in self.bot.owner_ids] ) else: owner = str(await self.bot.get_user_global(self.bot.owner_id)) guild_count = sum( await self.bot.cogs["Sharding"].handler("guild_count", self.bot.shard_count) ) meminfo = psutil.virtual_memory() embed = discord.Embed( title=_("IdleRPG Statistics"), colour=0xB8BBFF, url=self.bot.BASE_URL, description=_("Official Support Server Invite: https://discord.gg/MSBatf6"), ) embed.set_thumbnail(url=self.bot.user.avatar_url) embed.set_footer( text=f"IdleRPG {self.bot.version} | By {owner}", icon_url=self.bot.user.avatar_url, ) embed.add_field( name=_("Hosting Statistics"), value=_( """\ CPU Usage: **{cpu}%**, **{cores}** cores @ **{freq}** GHz RAM Usage: **{ram}%** (Total: **{total_ram}**) Python Version **{python}** <:python:445247273065250817> discord.py Version **{dpy}** Operating System: **{osname} {osversion}** Kernel Version: **{kernel}** PostgreSQL Version **{pg_version}**""" ).format( cpu=psutil.cpu_percent(), cores=psutil.cpu_count(), freq=psutil.cpu_freq().max / 1000, ram=meminfo.percent, total_ram=humanize.naturalsize(meminfo.total), python=platform.python_version(), dpy=pkg.get_distribution("discord.py").version, osname=sysinfo[0].title(), osversion=sysinfo[1], kernel=os.uname().release, pg_version=pg_version, ), inline=False, ) embed.add_field( name=_("Bot Statistics"), value=_( """\ Code lines written: **{lines}** Shards: **{shards}** Servers: **{guild_count}** Characters: **{characters}** Items: **{items}** Average hours of work: **{hours}**""" ).format( lines=self.bot.linecount, shards=self.bot.shard_count, guild_count=guild_count, characters=characters, items=items, hours=myhours, ), inline=False, ) await ctx.send(embed=embed)
def checkProcesses(): errorOccurred = False cpu_occurrences_max = 0 memory_occurrences_max = 0 notif_thread = None cpuTimeClosedProcesses = 0 IOReadCountClosedProcesses = 0 IOReadBytesClosedProcesses = 0 IOWriteCountClosedProcesses = 0 IOWriteBytesClosedProcesses = 0 pageFaultsClosedProcesses = 0 lastProcessesInfo = dict() processes = dict() processes[mainProcess.pid] = mainProcess while not threads_exit_event.is_set( ) and not errorOccurred: # Loops while the event flag has not been set start_timestamp = time.time() update_timeTuple = (round(start_timestamp), ) #print("[" + time.strftime("%d/%m/%Y - %H:%M:%S", time.localtime(start_timestamp)) + " - " + str(start_timestamp) + "]" + "[checkProcessesThread] The event flag is not set yet, continuing operation") print("Monitoring...") cpuUsage = 0.0 processesCPUTimes = [] processesCPUAfinnity = set() #Unique values only processesIOCounters = [] processesMemoryInfo = [] totalThreads = 0 lastProcessesList = [] for lastProc, lastProcInfo in lastProcessesInfo.items(): if not lastProc.is_running(): print("[checkProcessesThread] Process with PID " + str(lastProc.pid) + " and name '" + lastProcInfo[3] + "' closed from last iteration to the current one.") #cpu_times() cpuTimeClosedProcesses += (lastProcInfo[0].user + lastProcInfo[0].system) #io_counters() IOReadCountClosedProcesses += lastProcInfo[1].read_count IOReadBytesClosedProcesses += lastProcInfo[1].read_bytes IOWriteCountClosedProcesses += lastProcInfo[1].write_count IOWriteBytesClosedProcesses += lastProcInfo[1].write_bytes #memory_full_info() pageFaultsClosedProcesses += lastProcInfo[2].num_page_faults lastProcessesList.append(lastProc) for lastProc in lastProcessesList: lastProcessesInfo.pop(lastProc) #Try catch here in case the mainProcess dies try: processesList = mainProcess.children( recursive=True) #Get all processes descendants for proc in processes.values(): if not proc.is_running(): processes.pop(proc.pid) for process in processesList: if process.pid not in processes: processes[process.pid] = process except psutil.NoSuchProcess: if not mainProcess.is_running(): print("All processes are dead!!!") errorOccurred = True continue for proc in processes.values(): #Try catch in case some process besides main process dies, this way the execution won't stop due to a secondary process try: cpuUsage += proc.cpu_percent() / psutil.cpu_count() cpuUsage = round(cpuUsage, 2) totalThreads += proc.num_threads() processesCPUAfinnity.update(proc.cpu_affinity()) procCPUTimes = proc.cpu_times() procIOCounters = proc.io_counters() procMemoryInfo = proc.memory_full_info() procName = proc.name() processesCPUTimes.append(procCPUTimes) processesIOCounters.append(procIOCounters) processesMemoryInfo.append(procMemoryInfo) lastProcessesInfo[proc] = (procCPUTimes, procIOCounters, procMemoryInfo, procName) except psutil.NoSuchProcess: if not mainProcess.is_running(): print("All processes are dead!!!") errorOccurred = True break else: print("Some processes were closed!") if errorOccurred: continue #Getting 1 record of cpu, which is the sum of the cpu fields of the processes processesNumCores = len(processesCPUAfinnity) totalUserTime = 0 totalSystemTime = 0 #totalIdleTime = 0 for CPUTimes in processesCPUTimes: totalUserTime += CPUTimes.user totalSystemTime += CPUTimes.system totalCPUTime = round(totalUserTime + totalSystemTime + cpuTimeClosedProcesses) cpuRecord = (cpuUsage, processesNumCores, totalThreads, totalCPUTime) #Getting 1 record of IO, which is the sum of the IO fields of the processes totalReadCount = IOReadCountClosedProcesses totalWriteCount = IOWriteCountClosedProcesses totalReadBytes = IOReadBytesClosedProcesses totalWriteBytes = IOWriteBytesClosedProcesses for IOCounter in processesIOCounters: totalReadCount += IOCounter.read_count totalWriteCount += IOCounter.write_count totalReadBytes += IOCounter.read_bytes totalWriteBytes += IOCounter.write_bytes IORecord = (totalReadCount, totalWriteCount, totalReadBytes, totalWriteBytes) totalMemoryUsage = 0 totalPageFaults = pageFaultsClosedProcesses for memoryInfo in processesMemoryInfo: totalMemoryUsage += memoryInfo.uss totalPageFaults += memoryInfo.num_page_faults systemMemoryUsage = psutil.virtual_memory().used solrMemory = solrCurrentMemory() memoryRecord = (totalMemoryUsage, totalPageFaults, systemMemoryUsage, solrMemory) # Get percentage of used disk space for key in disks: try: if key.__contains__("(disk used by Autopsy)"): totalBytes = psutil.disk_usage( key.replace("(disk used by Autopsy)", ""))[0] currentUsedBytes = psutil.disk_usage( key.replace("(disk used by Autopsy)", ""))[1] percUsed = int(currentUsedBytes * 100 / totalBytes) else: totalBytes = psutil.disk_usage(key)[0] currentUsedBytes = psutil.disk_usage(key)[1] percUsed = int(currentUsedBytes * 100 / totalBytes) disks[key].append( str(percUsed) + ", " + str(datetime.now().timestamp())) except FileNotFoundError: disks[key].append("-1" + ", " + str(datetime.now().timestamp())) #Add all the records to the database add_updates_record(cpuRecord, IORecord, memoryRecord, update_timeTuple) #Send notifications if... if cpuUsage > int(config["CPU USAGE"]["max"], 10): if cpu_occurrences_max == int( config["NOTIFICATIONS"]["cpu_usage"]): cpu_max_notif_data = retrieve_cpu_values_notif() cpuUsageGraph("miscellaneous/cpu_notif_max", cpu_max_notif_data, int(config["CPU USAGE"]["max"])) lastCpuValue = cpu_max_notif_data[-1][0] notif_thread = threading.Thread(target=send_cpu_notif, args=(smtp_password, lastCpuValue)) notif_thread.start() cpu_occurrences_max = 0 else: cpu_occurrences_max += 1 else: cpu_occurrences_max = 0 #TODO: Create IO anomaly notification and call it here if totalMemoryUsage / 1000000 > int(config["MEMORY"]["max"]): if memory_occurrences_max == int( config["NOTIFICATIONS"]["memory_usage"]): memory_max_notif_data = retrieve_memory_values_notif() memoryUsageGraph("miscellaneous/memory_notif_max", memory_max_notif_data, int(config["MEMORY"]["max"])) lastMemoryValue = int(memory_max_notif_data[-1][0]) / 1000000 notif_thread = threading.Thread(target=send_memory_notif, args=(smtp_password, lastMemoryValue)) notif_thread.start() memory_occurrences_max = 0 else: memory_occurrences_max += 1 else: memory_occurrences_max = 0 finish_timestamp = time.time() waiting_time = float(config["TIME INTERVAL"]["process"]) - ( finish_timestamp - start_timestamp) # The thread will get blocked here unless the event flag is already set, and will break if it set at any time during the timeout if waiting_time > 0: threads_exit_event.wait(timeout=waiting_time) #if not errorOccurred: # print("[checkProcessesThread] Event flag has been set") print("Updating current job in the database") update_jobs_record() if notif_thread is not None: notif_thread.join() print("Powering off")
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-s', '--sequence', type=str, default='HHHHH', help='Chain sequence. Default: HHHHH') parser.add_argument('--grid_size', type=int, default=50, help='Length of the grid, default is 1 + length') parser.add_argument('--cores', type=int, default=psutil.cpu_count(), help='Number of cores. Default is {}'.format( psutil.cpu_count())) parser.add_argument('--draw', type=bool, default=False, help='Write conf to png. Default is false.') parser.add_argument('--verbose', type=bool, default=False, help='Print progress to stdout.') parser.add_argument( '--conf_directory', type=str, default='examples', help='Load configurations from here. Default = ./examples')
def CreateFrames(path, runName, t0, t1, hemisphere, configFile, ncpus): assert ((hemisphere == 'north') | (hemisphere == 'south')) hemiSelect = {'north': 'North', 'south': 'South'}[hemisphere] # Make sure the output directory exisits if not make it dirname = os.path.join(path, 'figs', hemisphere) if not os.path.exists(dirname): os.makedirs(dirname) print(('Rendering ' + hemiSelect + 'ern hemisphere, storing frames at ' + dirname)) #Now check to make sure the files are correct data = pyLTR.Models.MIX(path, runName) modelVars = data.getVarNames() for v in [ 'Grid X', 'Grid Y', 'Potential North [V]', 'Potential South [V]', 'FAC North [A/m^2]', 'FAC South [A/m^2]', 'Pedersen conductance North [S]', 'Pedersen conductance South [S]', 'Hall conductance North [S]', 'Hall conductance South [S]', 'Average energy North [keV]', 'Average energy South [keV]', 'Number flux North [1/cm^2 s]', 'Number flux South [1/cm^2 s]', 'Electron energy flux North [KeV/cm^2 s]', 'Electron energy flux South [KeV/cm^2 s]' ]: assert (v in modelVars) timeRange = data.getTimeRange() if len(timeRange) == 0: raise Exception(( 'No data files found. Are you pointing to the correct run directory?' )) index0 = 0 if t0: for i, t in enumerate(timeRange): if t0 >= t: index0 = i index1 = len(timeRange) if t1: for i, t in enumerate(timeRange): if t1 >= t: index1 = i print(('Extracting MIX quantities for time series over %d time steps.' % (index1 - index0))) # Output a status bar displaying how far along the computation is. #progress = pyLTR.StatusBar(0, index1-index0) #progress.start() # Pre-compute r and theta x = data.read('Grid X', timeRange[index0]) y = data.read('Grid Y', timeRange[index0]) theta = n.arctan2(y, x) theta[theta < 0] = theta[theta < 0] + 2 * n.pi # plotting routines now rotate local noon to point up #theta=theta+n.pi/2 # to put noon up r = n.sqrt(x**2 + y**2) # plotting routines now expect longitude and colatitude, in radians, stored in dictionaries longitude = {'data': theta, 'name': r'\phi', 'units': r'rad'} colatitude = {'data': n.arcsin(r), 'name': r'\theta', 'units': r'rad'} # Deal with the plot options if (configFile == None): #potOpts={'min':-100.,'max':100.,'colormap':'bwr'} #'RdBu_r'} #facOpts={'min':-1.,'max':1.,'colormap':'bwr'} #'RdBu_r'} rengOpts = {'min': 0., 'max': 10.} rflxOpts = { 'min': 0., 'max': 1.0e9, 'format_str': '%.1e', 'colormap': 'jet' } reflxOpts = { 'min': 0., 'max': 1.0e10, 'format_str': '%.1e', 'colormap': 'jet' } pedOpts = {'min': 1., 'max': 10., 'colormap': 'plasma'} halOpts = {'min': 1., 'max': 10., 'colormap': 'plasma'} engOpts = {'min': 0., 'max': 20.} flxOpts = { 'min': 0., 'max': 1.0e9, 'format_str': '%.1e', 'colormap': 'jet' } eflxOpts = { 'min': 0., 'max': 1.0e10, 'format_str': '%.1e', 'colormap': 'jet' } optsObject = { 'reng': rengOpts, 'rflx': rflxOpts, 'reflx': reflxOpts, 'ped': pedOpts, 'hall': halOpts, 'energy': engOpts, 'flux': flxOpts, 'eflux': eflxOpts } configFilename = os.path.join(dirname, 'IonSum.config') print(("Writing plot config file at " + configFilename)) f = open(configFilename, 'w') f.write(pyLTR.yaml.safe_dump(optsObject, default_flow_style=False)) f.close() else: f = open(configFile, 'r') optsDict = pyLTR.yaml.safe_load(f.read()) f.close() if ('pot' in optsDict): potOpts = optsDict['pot'] else: potOpts = {'min': -100., 'max': 100., 'colormap': 'RdBu_r'} if ('fac' in optsDict): facOpts = optsDict['fac'] else: facOpts = {'min': -1., 'max': 1., 'colormap': 'RdBu_r'} if ('ped' in optsDict): pedOpts = optsDict['ped'] else: pedOpts = {'min': 1., 'max': 8.} if ('hall' in optsDict): halOpts = optsDict['hall'] else: halOpts = {'min': 1., 'max': 8.} if ('energy' in optsDict): engOpts = optsDict['energy'] else: engOpts = {'min': 0., 'max': 20.} if ('flux' in optsDict): flxOpts = optsDict['flux'] else: flxOpts = {'min': 0., 'max': 1.0e9, 'format_str': '%.1e'} args = ((i, time, hemiSelect, hemisphere, data, rengOpts, rflxOpts, reflxOpts, pedOpts, halOpts, engOpts, flxOpts, eflxOpts, longitude, colatitude, path) for i, time in enumerate(timeRange[index0:index1])) #pl=Pool(processes=8) #for i,time in enumerate(timeRange[index0:index1]): # #print(i,time) # pl.apply_async(PlotStuff,args=(i,time,hemiSelect,hemisphere,data,potOpts,facOpts,pedOpts,halOpts,engOpts,flxOpts,longitude,colatitude,path)) #pl=Pool() #pl.starmap(PlotStuff,args) #pl.close() #pl.join() print('This system has ', cpu_count(logical=False), ' cpus.') ncpus = min(int(ncpus), cpu_count(logical=False)) print('We will use ', ncpus, ' cpus for parallelization') with Pool(processes=ncpus) as pl: pl.starmap(PlotStuff, args) return os.path.join(path, 'figs', hemisphere)
class SystemInfo(object): def get_size(bytes, suffix="B"): """ Scale bytes to its proper format e.g: 1253656 => '1.20MB' 1253656678 => '1.17GB' """ factor = 1024 for unit in ["", "K", "M", "G", "T", "P"]: if bytes < factor: return f"{bytes:.2f}{unit}{suffix}" bytes /= factor #System Information print("=" * 40, "System Information", "=" * 40) uname = platform.uname() print(f"System: {uname.system}") print(f"Node Name: {uname.node}") print(f"Release: {uname.release}") print(f"Version: {uname.version}") print(f"Machine: {uname.machine}") print(f"Processor: {uname.processor}") # Boot Time print("=" * 40, "Boot Time", "=" * 40) boot_time_timestamp = psutil.boot_time() bt = datetime.fromtimestamp(boot_time_timestamp) print( f"Boot Time: {bt.year}/{bt.month}/{bt.day} {bt.hour}:{bt.minute}:{bt.second}" ) ## CPU Information # let's print CPU information print("=" * 40, "CPU Info", "=" * 40) # number of cores print("Physical cores:", psutil.cpu_count(logical=False)) print("Total cores:", psutil.cpu_count(logical=True)) # CPU frequencies cpufreq = psutil.cpu_freq() print(f"Max Frequency: {cpufreq.max:.2f}Mhz") print(f"Min Frequency: {cpufreq.min:.2f}Mhz") print(f"Current Frequency: {cpufreq.current:.2f}Mhz") # CPU usage print("CPU Usage Per Core:") for i, percentage in enumerate(psutil.cpu_percent(percpu=True, interval=1)): print(f"Core {i}: {percentage}%") print(f"Total CPU Usage: {psutil.cpu_percent()}%") ## Memory Usage # Memory Information print("=" * 40, "Memory Information", "=" * 40) # get the memory details svmem = psutil.virtual_memory() print(f"Total: {get_size(svmem.total)}") print(f"Available: {get_size(svmem.available)}") print(f"Used: {get_size(svmem.used)}") print(f"Percentage: {svmem.percent}%") print("=" * 20, "SWAP", "=" * 20) # get the swap memory details (if exists) swap = psutil.swap_memory() print(f"Total: {get_size(swap.total)}") print(f"Free: {get_size(swap.free)}") print(f"Used: {get_size(swap.used)}") print(f"Percentage: {swap.percent}%") # Disk Information print("=" * 40, "Disk Information", "=" * 40) print("Partitions and Usage:") # get all disk partitions partitions = psutil.disk_partitions() for partition in partitions: print(f"=== Device: {partition.device} ===") print(f" Mountpoint: {partition.mountpoint}") print(f" File system type: {partition.fstype}") try: partition_usage = psutil.disk_usage(partition.mountpoint) except PermissionError: # this can be catched due to the disk that # isn't ready continue print(f" Total Size: {get_size(partition_usage.total)}") print(f" Used: {get_size(partition_usage.used)}") print(f" Free: {get_size(partition_usage.free)}") print(f" Percentage: {partition_usage.percent}%") # get IO statistics since boot disk_io = psutil.disk_io_counters() print(f"Total read: {get_size(disk_io.read_bytes)}") print(f"Total write: {get_size(disk_io.write_bytes)}") # Network information print("=" * 40, "Network Information", "=" * 40) # get all network interfaces (virtual and physical) if_addrs = psutil.net_if_addrs() for interface_name, interface_addresses in if_addrs.items(): for address in interface_addresses: print(f"=== Interface: {interface_name} ===") if str(address.family) == 'AddressFamily.AF_INET': print(f" IP Address: {address.address}") print(f" Netmask: {address.netmask}") print(f" Broadcast IP: {address.broadcast}") elif str(address.family) == 'AddressFamily.AF_PACKET': print(f" MAC Address: {address.address}") print(f" Netmask: {address.netmask}") print(f" Broadcast MAC: {address.broadcast}") # get IO statistics since boot net_io = psutil.net_io_counters() print(f"Total Bytes Sent: {get_size(net_io.bytes_sent)}") print(f"Total Bytes Received: {get_size(net_io.bytes_recv)}") system_df = pd.DataFrame( index=[uname.system, uname.node, uname.machine, uname.processor]) print(system_df.head())
class ResourceMonitor(threading.Thread): load_average_scan_minutes = 15 cores = psutil.cpu_count() cpu_perc = [] vmem_perc = [] lock = threading.Lock() def __init__(self, load_average_scan_minutes): threading.Thread.__init__(self) ResourceMonitor.load_average_scan_minutes = load_average_scan_minutes ResourceMonitor.lock.acquire() ResourceMonitor.vmem_perc.append(psutil.virtual_memory().percent) ResourceMonitor.vmem_perc.append(psutil.virtual_memory().percent) ResourceMonitor.cpu_perc.append( psutil.cpu_percent(interval=0, percpu=False)) ResourceMonitor.cpu_perc.append( psutil.cpu_percent(interval=0, percpu=False)) ResourceMonitor.lock.release() def proc_is_running(self, proc_defs): for proc in psutil.process_iter(): try: process = psutil.Process( proc.pid) # Get the process info using PID if process.is_running(): pid = str(process.pid) ppid = str(process.ppid) status = process.status() cpu_percent = process.cpu_percent() mem_percent = process.memory_percent() rss = str(process.memory_info().rss) vms = str(process.memory_info().vms) username = process.username() name = process.name() # Here is the process name path = process.cwd() cmdline = ' '.join(process.cmdline()) print( "Get the process info using (path, name, cmdline): [%s / %s / %s]" % (path, name, cmdline)) for _p in proc_defs: # logger.info("Look for process: [%s] / Status [%s]" % (_p, status.lower())) # print("Look for process: [%s] / Status [%s]" % (_p, status.lower())) if (status.lower() != "sleeping") and \ ('name' in _p and _p['name'] in name) and \ ('cwd' in _p and _p['cwd'] in path) and \ ('cmdline' in _p and _p['cmdline'] in cmdline): return True except: import traceback tb = traceback.format_exc() logger.debug(tb) print(tb) return False def run(self): while True: ResourceMonitor.lock.acquire() ResourceMonitor.vmem_perc[1] = (ResourceMonitor.vmem_perc[0] + ResourceMonitor.vmem_perc[1]) / 2.0 ResourceMonitor.vmem_perc[0] = ( ResourceMonitor.vmem_perc[1] + psutil.virtual_memory().percent) / 2.0 ResourceMonitor.cpu_perc[1] = ResourceMonitor.cpu_perc[0] ResourceMonitor.cpu_perc[0] = psutil.cpu_percent( interval=(ResourceMonitor.load_average_scan_minutes * 60), percpu=False) ResourceMonitor.lock.release()
# pre-calculate inclination-independent quantities st = star.Star(omega[o], L[l], M[m], 1, ut.D10, 100, ld=limb) for i in np.arange(len(inc)): # calculate the magnitudes at this inclination; # these should be a 1D array, corresponding to bands and reddenings as follows: # (b0, av0), (b0, av1), ..., (b0, avm), (b1, av0), ..., (bn, avm) mags = st.integrate(inc[i]) result[o, i, m, :, :] = mags.reshape(len(bands), len(av)) except mp.InterpolationError as err: pass print('.', end='', flush=True) return result sockets = 2 # number of chips on the machine num_cpu = sockets * psutil.cpu_count( logical=False) # number of cores in each parallelized run # limb darkening # 0: Z # 1: A_V with open(iodir + 'data/ldlist.pkl', 'rb') as fl: ldlist = pickle.load(fl) # bands bands = ldlist[0].bands ## model parameters # metallicities and reddening coefficients Z = [] for iz in range(len(ldlist)): l = ldlist[iz]
def init(self, ncpu=None, affinity=None, parent_affinity=0, num_io_processes=1, verbose=0, pause_on_start=False): """ Initializes an APP. Can be called multiple times at program startup Args: ncpu: affinity: parent_affinity: num_io_processes: verbose: Returns: """ self.affinity = affinity self.verbose = verbose self.pause_on_start = pause_on_start if isinstance(self.affinity, int): self.cpustep = abs(self.affinity) or 1 maxcpu = psutil.cpu_count() // self.cpustep self.ncpu = ncpu or maxcpu self.parent_affinity = parent_affinity elif isinstance(self.affinity, list): if any(map(lambda x: x < 0, self.affinity)): raise RuntimeError( "Affinities must be list of positive numbers") if psutil.cpu_count() < max(self.affinity): raise RuntimeError( "There are %d virtual threads on this system. Some elements of the affinity map are " "higher than this. Check parset." % psutil.cpu_count()) self.ncpu = ncpu or len(self.affinity) if self.ncpu != len(self.affinity): print(ModColor.Str( "Warning: NCPU does not match affinity list length. Falling back to " "NCPU=%d" % len(self.affinity)), file=log) self.ncpu = self.ncpu if self.ncpu == len(self.affinity) else len( self.affinity) maxcpu = max(self.affinity) + 1 # zero indexed list self.parent_affinity = parent_affinity elif isinstance(self.affinity, str) and str( self.affinity) == "enable_ht": self.affinity = 1 self.cpustep = 1 maxcpu = psutil.cpu_count() // self.cpustep self.ncpu = ncpu or maxcpu self.parent_affinity = parent_affinity elif isinstance(self.affinity, str) and str( self.affinity) == "disable_ht": # this works on Ubuntu so possibly Debian-like systems, no guarantees for the rest # the mapping on hyperthread-enabled NUMA machines with multiple processors can get very tricky # /sys/devices/system/cpu/cpu*/topology/thread_siblings_list should give us a list of siblings # whereas core_siblings_list will give the list of sibling threads on the same physical processor # for now lets not worry about assigning different jobs to physical cpus but for now keep things # simple hyperthread_sibling_lists = map( lambda x: x + "/topology/thread_siblings_list", filter(lambda x: re.match(r"cpu[0-9]+", os.path.basename(x)), glob.glob("/sys/devices/system/cpu/cpu*"))) left_set = set([]) right_set = set([]) for siblings_file in hyperthread_sibling_lists: with open(siblings_file) as f: siblings = map(int, f.readline().split(",")) if len(siblings) == 2: l, r = siblings # cannot be sure the indices don't swap around at some point # since there are two copies of the siblings we need to # check that the items are not in the other sets before adding # them to the left and right sets respectively if l not in right_set: left_set.add(l) if r not in left_set: right_set.add(r) elif len(siblings) == 1: left_set.add(siblings[0]) else: raise RuntimeError( "Don't know how to handle this architecture. It seems there are more than " "two threads per core? Try setting things manually by specifying a list " "to the affinity option") self.affinity = list(left_set) # only consider 1 thread per core self.ncpu = ncpu or len(self.affinity) if self.ncpu > len(self.affinity): print(ModColor.Str( "Warning: NCPU is more than the number of physical cores on " "the system. I will only use %d cores." % len(self.affinity)), file=log) self.ncpu = self.ncpu if self.ncpu <= len(self.affinity) else len( self.affinity) maxcpu = max(self.affinity) + 1 # zero indexed list unused = [ x for x in range(psutil.cpu_count()) if x not in self.affinity ] if len(unused) == 0: print(ModColor.Str( "No unassigned vthreads to use as parent IO thread, I will use thread 0" ), file=log) self.parent_affinity = 0 # none unused (HT is probably disabled BIOS level) else: self.parent_affinity = unused[ 0] # grab the first unused vthread elif isinstance(self.affinity, str) and str( self.affinity) == "disable": self.affinity = None self.parent_affinity = None self.cpustep = 1 maxcpu = psutil.cpu_count() self.ncpu = ncpu or maxcpu else: raise RuntimeError( "Invalid option for Parallel.Affinity. Expected cpu step (int), list, " "'enable_ht', 'disable_ht', 'disable'") if self.parent_affinity is None: print("Parent and I/O affinities not specified, leaving unset", file=log) else: print(ModColor.Str("Fixing parent process to vthread %d" % self.parent_affinity, col="green"), file=log) psutil.Process().cpu_affinity( range(self.ncpu ) if not self.parent_affinity else [self.parent_affinity]) # if NCPU is 0, set to number of CPUs on system if not self.ncpu: self.ncpu = maxcpu elif self.ncpu > maxcpu: print(ModColor.Str( "NCPU=%d is too high for this setup (%d cores, affinity %s)" % (self.ncpu, psutil.cpu_count(), str(self.affinity) if isinstance( self.affinity, int) else ",".join(map(str, self.affinity)) if isinstance(self.affinity, list) else "disabled")), file=log) print(ModColor.Str("Falling back to NCPU=%d" % (maxcpu)), file=log) self.ncpu = maxcpu self.procinfo = psutil.Process( ) # this will be used to control CPU affinity # create a queue for compute-bound tasks # generate list of CPU cores for workers to run on if isinstance(self.affinity, int) and (not self.affinity or self.affinity == 1): cores = range(self.ncpu) elif isinstance(self.affinity, int) and self.affinity == 2: cores = range(0, self.ncpu * 2, 2) elif isinstance(self.affinity, int) and self.affinity == -2: cores = range(0, self.ncpu * 2, 4) + range(1, self.ncpu * 2, 4) elif isinstance(self.affinity, list): cores = self.affinity[:self.ncpu] elif not self.affinity: cores = range(self.ncpu) else: raise ValueError("unknown affinity setting") if not self.affinity: print("Worker affinities not specified, leaving unset", file=log) else: print(ModColor.Str("Worker processes fixed to vthreads %s" % (','.join([str(x) for x in cores])), col="green"), file=log) self._compute_workers = [] self._io_workers = [] self._compute_queue = multiprocessing.Queue() self._io_queues = [ multiprocessing.Queue() for x in range(num_io_processes) ] self._result_queue = multiprocessing.Queue() self._termination_event = multiprocessing.Event() # this event is set when all workers have been started, an cleared when a restart is requested self._workers_started_event = multiprocessing.Event() self._cores = cores # create a Taras Bulba process. http://www.imdb.com/title/tt0056556/quotes # This is responsible for spawning, killing, and respawning workers self._taras_restart_event = multiprocessing.Event() self._taras_exit_event = multiprocessing.Event() if self.ncpu > 1: self._taras_bulba = multiprocessing.Process( target=AsyncProcessPool._startBulba, name="TB", args=(self, )) if pause_on_start: print(ModColor.Str( "Please note that due to your debug settings, worker processes will be paused on startup. Send SIGCONT to all processes to continue.", col="blue"), file=log) else: self._taras_bulba = None self._started = False
def cpuInit(self): print('cpuInit') #drawing area for cpu self.cpuDrawArea=self.builder.get_object('cpudrawarea') self.cpuUtilArray=[0]*100 #cpu util array self.cpu_logical_cores=ps.cpu_count() self.cpu_logical_cores_util_arrays=[] temp=ps.cpu_percent(percpu=True) for i in range(self.cpu_logical_cores): self.cpu_logical_cores_util_arrays.append([0]*99) self.cpu_logical_cores_util_arrays[i].append(temp[i]) self.logical_cpu_grid=self.builder.get_object('logical_grid_area') ## cpu draw tab labels self.cpuInfoLabel=self.builder.get_object('cpuinfolabel') ## cpu utilisation label self.cpuUtilLabelValue=self.builder.get_object('cpuutilisation') # cpu speed self.cpuSpeedLabelValue=self.builder.get_object('cpuspeed') # processes self.cpuProcessesLabelValue=self.builder.get_object('cpuprocesses') self.cpuThreadsLabelValue=self.builder.get_object('cputhreads') ## other cpu info self.cpuCoreLabelValue=self.builder.get_object('cpucoreslablevalue') self.cpuLogicalLabelValue=self.builder.get_object('cpulogicallabelvalue') self.cpuVirtualisationLabelValue=self.builder.get_object('cpuvirtualisationlabelvalue') self.cpuL1LabelValue=self.builder.get_object('cpul1labelvalue') self.cpuL2LabelValue=self.builder.get_object('cpul2labelvalue') self.cpuL3LabelValue=self.builder.get_object('cpul3labelvalue') self.cpuTempLabelValue=self.builder.get_object('cputemplabelvalue') self.cpuFanSpeedLabelValue=self.builder.get_object('cpufanspeedlabelvalue') self.cpuMxSpeedLabelValue=self.builder.get_object('cpumxspeedlabelvalue') try: ## for the first time only to get the name of the cpu p=popen('cat /proc/cpuinfo |grep -m1 "model name"') self.cpuname=p.read().split(':')[1].split('\n')[0] #print(self.cpuname) # cpu name self.cpuInfoLabel.set_text(self.cpuname) self.cpuInfoLabel.set_valign(g.Align.CENTER) p.close() except: print("Failed to get model information") self.cpuCoreLabelValue.set_text(str(ps.cpu_count(logical=False))) self.cpuLogicalLabelValue.set_text(str(self.cpu_logical_cores)) try: p=popen('lscpu|grep -i -E "(vt-x)|(amd-v)"') temp=p.read() if temp: temptext="Enabled" else: temptext="Disabled" self.cpuVirtualisationLabelValue.set_text(temptext) p.close() except: print("Failed to get Virtualisation information") try: p=popen('lscpu|grep -i -m1 "L1d cache"') self.cpuL1LabelValue.set_text(sub("[\s]","",p.read().split(':')[1])) p.close() p=popen('lscpu|grep -i -m1 "L2 cache"') self.cpuL2LabelValue.set_text(sub('[\s]','',p.read().split(':')[1])) p.close() p=popen('lscpu|grep -i "L3 cache"') self.cpuL3LabelValue.set_text(sub('[\s]','',p.read().split(':')[1])) p.close() except: print("Failed to get Cache information") self.speed=ps.cpu_freq() self.cpuMxSpeedLabelValue.set_text('{:.2f} GHz'.format(self.speed[2]/1000)) self.num_of_column_per_row={ 1:1, 2:2, 3:3, 4:2, 5:3, 6:3, 7:4, 8:4, 9:3, 10:5, 11:4, 12:4, 13:5, 14:5, 15:5, 16:4, 17:5, 18:5, 19:5, 20:5, 21:6, 22:6, 23:6, 24:6, 25:7, 26:7, 27:7, 28:7, 29:8, 30:8, 31:8, 32:8 } ## logical self.cpu_logical_cores_draw_areas=[] row,column=0,0 for cpu_index in range(self.cpu_logical_cores): draw_area=g.DrawingArea() draw_area.set_name(str(cpu_index)) self.cpu_logical_cores_draw_areas.append(draw_area) # draw_area=g.Button(label="begin{0}".format(cpu_index)) if column < self.num_of_column_per_row[self.cpu_logical_cores]: self.logical_cpu_grid.attach(draw_area,column,row,1,1) column+=1 else: column=0 row+=1 self.logical_cpu_grid.attach(draw_area,column,row,1,1) column+=1 draw_area.connect('draw',self.on_cpu_logical_drawing) self.logical_cpu_grid.show_all()