def execute(self, quals, columns): if self.table_type == "processes": for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=self.columns.keys()) except psutil.NoSuchProcess: pass else: yield pinfo if self.table_type == "listening_ports": conns = [] for proc in psutil.process_iter(): try: for conn in proc.get_connections(kind="inet"): conns.append((proc.pid, conn)) except psutil._error.AccessDenied: pass for pid, conn in conns: if conn.status == "LISTEN": yield { "pid": pid, "address": conn.laddr[0], "port": conn.laddr[1], }
def collectProcesses(configobj): process = [] if sys.platform == "linux" or sys.platform == "linux2": processlist = [] for pid in getPids(): proc = procStats(str(pid)) if proc != False: process.append(proc) elif sys.platform == "darwin": for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'ppid', 'exe', 'cmdline', 'username', 'cpu_percent','memory_percent']) except psutil.NoSuchProcess: pass else: if ''.join(pinfo['cmdline']) != '': process.append(pinfo) elif sys.platform == "win32": for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['pid', 'name', 'ppid', 'exe', 'cmdline', 'username', 'cpu_percent','memory_percent']) except psutil.NoSuchProcess: pass else: process.append(pinfo) return process
def get_process_list(): process_list = list() for p in psutil.process_iter(): try: process_list.append( [ p.pid, p.name(), p.status(), p.username(), #p.io_counters(), #p.nice(), #p.children(recursive=True), #p.connections(), ] ) except psutil.ZombieProcess: continue for proc in psutil.process_iter(): if proc.name() == 'soffice.bin': print 'znalazlem!!!!' try: proc.kill() print 'zabito soffice.bin' except: print 'nie zabito' continue return process_list
def KillTest(pid=None, exe=None): """Kill off a running test, specified either by pid or by name, or both. First, recursively kill pid and all its children. Then check all running processes to see if they are instances of exe, and recursively kill them if so. Requires 'easy_install psutil' to function. """ # Recursive kill of pid and its children, killing innermost first if pid: try: for proc in psutil.process_iter(): if get_prop(proc, 'ppid') == pid: KillTest(proc.pid) print "Killing", pid os.kill(pid, signal.SIGKILL) except (psutil.NoSuchProcess, OSError): pass # Kill by name - figure out pid and call ourselves with that if exe: #print "Killing", exe for proc in psutil.process_iter(): try: if get_prop(proc, 'exe') == exe: KillTest(proc.pid) except (psutil.NoSuchProcess, psutil.AccessDenied, OSError): pass
def popups_poller_cb(self): # build an orderd list of all running procs (pid, name, cpu_perc, mun_t) if psutil.version_info[0] < 2: self.top_procs = [ (p.pid, p.name, p.get_cpu_percent(interval=0) / self.num_cores, p.get_num_threads()) for p in psutil.process_iter() ] else: self.top_procs = [ (p.pid, p.name(), p.cpu_percent(interval=0) / self.num_cores, p.num_threads()) for p in psutil.process_iter() ] self.top_procs.sort(key=itemgetter(2), reverse=True) # update all the visible genlists for popup in self._popups: li = popup.data["list"] # adjust the size (items count) of the genlist items_count = li.items_count() procs_count = len(self.top_procs) if procs_count > items_count: for idx in range(items_count, procs_count): li.item_append(self.popups_itc, idx) elif procs_count < items_count: for idx in range(procs_count, items_count): li.last_item.delete() # update visible list items and the header text li.realized_items_update() popup.data["head"].text = "{} Running processes".format(procs_count) return ecore.ECORE_CALLBACK_RENEW
def pids( check=None, name=None): #It currently supports Linux, Windows, OSX, FreeBSD and Sun Solaris, #both 32-bit and 64-bit architectures, with Python versions from 2.6 to 3.5 #(users of Python 2.4 and 2.5 may use 2.1.3 version). from psutil import process_iter if check == "wholelist": print ("") for p in process_iter(): try: print ('\tPID: {0} \t\t {1}'.format(p.pid,p.name())) except: pass print ("") else: cache = None print ("") for p in process_iter(): try: if name.lower() in p.name().lower(): cache = True print ('\tPID: {0} \t\t {1}'.format(p.pid,p.name())) except: pass print ("") if cache == None: print ("PID not founded for : {0} ".format(name))
def _get_processes(self): processes = [] # Ask CPU counters try: for p in psutil.process_iter(): if psutil.version_info[:2] >= (2, 0): p.as_dict(['cpu_percent']) else: p.as_dict(['get_cpu_percent']) except: pass # Wait to get correct cpu data sleep(2) for p in psutil.process_iter(): try: if psutil.version_info[:2] >= (2, 0): p.dict = p.as_dict(['username', 'nice', 'memory_info', 'memory_percent', 'cpu_percent', 'name', 'status']) else: p.dict = p.as_dict(['username', 'nice', 'get_memory_info', 'get_memory_percent', 'get_cpu_percent', 'name', 'status']) processes.append(p) except: pass return self._process_parser(processes)
def _hack_check_for_duplicates(self): # TEMPORARY HACK: Look for multiple instance of the # mycroft-speech-client and/or mycroft-skills services, which could # happen when upgrading a shipping Mark 1 from release 0.8.17 or # before. When found, force the unit to reboot. import psutil LOG.info("Hack to check for duplicate service instances") count_instances = 0 needs_reboot = False for process in psutil.process_iter(): if process.cmdline() == ['python2.7', '/usr/local/bin/mycroft-speech-client']: count_instances += 1 if (count_instances > 1): LOG.info("Duplicate mycroft-speech-client found") needs_reboot = True count_instances = 0 for process in psutil.process_iter(): if process.cmdline() == ['python2.7', '/usr/local/bin/mycroft-skills']: count_instances += 1 if (count_instances > 1): LOG.info("Duplicate mycroft-skills found") needs_reboot = True if needs_reboot: LOG.info("Hack reboot...") self.reader.process("unit.reboot") self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset"))
def _get_proc_obj(self): """ This function returns the process' corresponding object. :returns: ```obj``` The process object that corresponds to the name. :returns: ```list of obj``` A list of process objects that correspond to the name. """ filters = self.config.get("filters") proc_list = list() if filters: for proc in psutil.process_iter(): try: for f in filters: if f in proc.name(): proc_list.append(proc) break except psutil.NoSuchProcess: logger.exception("Error occurred obtaining process.") pass else: for proc in psutil.process_iter(): try: proc_list.append(proc) except psutil.NoSuchProcess: logger.exception("Error occurred obtaining process.") pass return proc_list
def test_watcher_does_not_leak(self): try: self.watcher = DeviceWatcher(portToWatch=5555, callback=self.assert_watcher_receive_correct_data) self.watcher.start() assert self.watcher.isRunning() == True self.watcher.stop() procName = self.watcher.getProcName() isReallyRunning = False for proc in psutil.process_iter(): if proc.name == self.watcher.getProcName(): assert proc.status == psutil.STATUS_RUNNING isReallyRunning == True assert isReallyRunning self.watcher.stop() assert self.watcher.isRunning() == False isReallyClosed = True for proc in psutil.process_iter(): if proc.name == procName: isReallyClosed = False assert isReallyClosed except Exception as e: print e #May not work on all platform #TODO : Make it work on MAC self.watcher.stop() pass
def process_finder(self, process=None, search_string=None): """ Create a set of pids of selected process. Search for search_string and process lists """ counter = 0 found_process_list = [] if search_string: for proc in psutil.process_iter(): found = False for string in search_string: if not found: if string in ' '.join(proc.cmdline): found = True if found: found_process_list.append(proc.pid) if process: for proc in psutil.process_iter(): for current_proc in process: if current_proc == 'All': found_process_list.append(proc.pid) if proc.name == current_proc: found_process_list.append(proc.pid) return set(found_process_list)
def get_pids(s): #returns a list of the worker PID(s) from a deamon process process_name = s parent_pid = 0 pid_list = [] #find parent PID for p in psutil.process_iter(): parent_found = False if p.ppid() == 1 and (process_name in p.name()): parent_pid = p.pid parent_found = True break if not parent_found: print 'daemon ' + s + ' not found' #find children of parent PID else: for p in psutil.process_iter(): if p.ppid() == parent_pid: pid_list.append(p.pid) if len(pid_list) == 0: print 'daemon ' + s + ' has no worker processes' return pid_list
def maintain(): cmdline = 'run_worker.py' cnt = 0 for proc in psutil.process_iter(): if proc.username() == 'cx28': for part in proc.cmdline(): if cmdline in part: cnt += 1 if cnt == 1: print 'good' return print 'need to kill' for proc in psutil.process_iter(): if proc.username() == 'cx28': for part in proc.cmdline(): if cmdline in part: if cnt > 1: cnt -= 1 try: proc.kill() except Exception, e: print e
def com_process_list(process_name=None): """ Get processes and optionally check for one """ if process_name is not None: return process_name in psutil.process_iter() else: return psutil.process_iter()
def protoandport(): protport = {} if sys.platform.startswith('win'): for process in psutil.process_iter(): if 'tvnserver.exe' in process.name(): process_handler = psutil.Process(process.pid) for cux in process_handler.connections(): if cux.status == psutil.CONN_LISTEN: protport['vnc'] = cux.laddr.port elif 'sshd.exe' in process.name(): process_handler = psutil.Process(process.pid) for cux in process_handler.connections(): if cux.status == psutil.CONN_LISTEN: protport['ssh'] = cux.laddr.port for service in psutil.win_service_iter(): if 'TermService' in service.name(): service_handler = psutil.win_service_get('TermService') if service_handler.status() == 'running': pid = service_handler.pid() process_handler = psutil.Process(pid) for cux in process_handler.connections(): if cux.status == psutil.CONN_LISTEN: protport['rdp'] = cux.laddr.port elif sys.platform.startswith('linux'): for process in psutil.process_iter(): if 'Xvnc' in process.name(): process_handler = psutil.Process(process.pid) for cux in process_handler.connections(): try: ip = cux.laddr[0] port = cux.laddr[1] except Exception: ip = cux.laddr.ip port = cux.laddr.port if cux.status == psutil.CONN_LISTEN and ip == "0.0.0.0": protport['vnc'] = port elif 'sshd' in process.name(): process_handler = psutil.Process(process.pid) for cux in process_handler.connections(): try: ip = cux.laddr[0] port = cux.laddr[1] except Exception: ip = cux.laddr.ip port = cux.laddr.port if cux.status == psutil.CONN_LISTEN and ip == "0.0.0.0": protport['ssh'] = port elif sys.platform.startswith('darwin'): for process in psutil.process_iter(): if 'ARDAgent' in process.name(): protport['vnc'] = '5900' for cux in psutil.net_connections(): if cux.laddr.port == 22 and cux.status == psutil.CONN_LISTEN: protport['ssh'] = '22' return protport
def _get_workers(self): """Get the list of processes in which WSGI server is running.""" if self.workers > 0: return [proc.pid for proc in psutil.process_iter() if proc.ppid == self.service_pid] else: return [proc.pid for proc in psutil.process_iter() if proc.pid == self.service_pid]
def get_heavy_tasks(thr, t=1): from psutil import process_iter [p.cpu_percent() for p in process_iter()] sleep(t) r = [] for p in process_iter(): cpu = p.cpu_percent(None) if cpu > 10: print("{pid:<7} {name:<12} {cpu}% CPU".format(pid=p.pid, name=p.name(), cpu=cpu)) r.append(p.pid) return r
def initBBGSession(host="LocalHost" , port=8194,start_service=True): # Fill SessionOptions pythons_psutil = [] for p in psutil.process_iter(): try: if p.name() == 'bbcomm.exe': pythons_psutil.append(p) except psutil.Error: pass if (np.size(pythons_psutil)==0) and start_service: pid = s.Popen('C:\\blp\\API\\bbcomm.exe',shell=True).pid #retcode = s.call('C:\\blp\\API\\bbcomm.exe',shell=False) not_started=True while not_started: print 'Waiting for bbcomm to start' t.sleep(2) for p in psutil.process_iter(): try: if p.name() == 'bbcomm.exe': print 'Successfully started bbcomm.exe' not_started=False except psutil.Error: pass else: print "Found bbcomm.exe, no need to launch it" sessionOptions = blpapi.SessionOptions() sessionOptions.setServerHost(host) sessionOptions.setServerPort(port) print "Connecting to %s:%s" % (host, port) # Create a Session session = blpapi.Session(sessionOptions) # Start a Session if not session.start(): print "Failed to start session." return try: # Open service to get historical data from if not session.openService("//blp/refdata"): print "Failed to open //blp/refdata" return except: session.stop() #end of openSession return session
def get_process(pids=None, names=None): """ Return a list of specified local processes. Arguments: pids - list of process PIDs to get; names - list of process names to get. Return: a list of process dicts. """ if not pids and not names: processes = [process for process in psutil.process_iter()] else: pids = parse_pids(pids) names = parse_args_list(names) processes = [psutil.Process(pid) for pid in pids if psutil.pid_exists(pid)] if names and not pids: # Do not add current python process to result list. cur_pid = getpid() local_processes = [proc for proc in psutil.process_iter() if proc.pid != cur_pid] for name in names: for process in local_processes: try: if fnmatch(process.name(), name) or fnmatch( ' '.join(process.cmdline()), name): processes.append(process) except psutil.AccessDenied: pass result = [] for process in processes: try: try: hostname = gethostbyname(gethostname()) except gaierror: hostname = gethostbyname('localhost') temp = { 'pid': process.pid, 'name': process.name(), 'status': str(process.status()), 'cmd': ' '.join(process.cmdline()), 'node': str(getnode()), 'endpoint': hostname } if pids or names: temp['cpu'] = process.cpu_percent() / psutil.cpu_count() temp['ram'] = long(process.memory_info()[0]) / 1024 if temp not in result: result.append(temp) except (psutil.NoSuchProcess, psutil.AccessDenied): print 'NoSuchProcess or AccessDenied exception occurred' return result
def _get_workers(self): """Get the list of processes in which WSGI server is running.""" def safe_ppid(proc): try: return proc.ppid except psutil.NoSuchProcess: return None if self.workers > 0: return [proc.pid for proc in psutil.process_iter() if safe_ppid(proc) == self.service_pid] else: return [proc.pid for proc in psutil.process_iter() if proc.pid == self.service_pid]
def main(*args) -> None: resources.init('jacklaxson', 'austere') j = resources.user.read("config.json") light_browser = json.loads(j)['light_browser'] use_light = False # check for steam games running overlay = [x for x in psutil.process_iter() if "gameoverlayui" in x.name()] if len(overlay) > 0: # all of the gameoverlayui's have the pid game_pid = int(overlay[0].cmdline()[2]) logger.info("Detected game %s", psutil.Process(pid=game_pid).name()) use_light = True # check for big picture games # they're direct descendants of steam elif len(overlay) == 0: for proc in psutil.process_iter(): if proc.name() == "steam": z = list(filter(lambda x: x.name() not in ['steamwebhelper','steam','sh', 'SteamChildMonit'], proc.children(recursive=True))) if len(z) == 1: logger.info("Detected game %s", z[0]) use_light = True elif len(z) == 0: logger.info("Found no games running in big picture mode") else: logger.error("Found more than one potential game process, this behavior is undefined") logger.info(z) elif proc.name() == 'Battle.net.exe': z = list(filter(lambda x: x.name() not in ['Battle.net Helper.exe', 'CrashMailer_64.exe'], proc.children(recursive=True))) if len(z) == 1: logger.info("Detected game %s", z[0]) use_light = True else: logger.info(z) logger.info("battlenet children: %s", proc.children(recursive=True)) # check if we're almost out of memory elif psutil.virtual_memory().percent > 90: use_light = True # check battery info if use_light: if platform.system() == "Windows": pass else: subprocess.call([light_browser, sys.argv[1]]) else: if platform.system() == "Windows": subprocess.call(['powershell.exe', '-Command', 'start {}'.format(sys.argv[1])]) else: subprocess.call(['x-www-browser', sys.argv[1]]) logger.debug(args)
def checkStreams(self): report = [] self.l.log(3, "checkStreams started") for p in psutil.process_iter(): cmd = " ".join(psutil.Process(p.pid).cmdline) if p.name == "vlc" and "dummy" not in cmd: self.l.log(3, "checkStreams: %s (%s)" % (p, cmd)) kill = 1 for con in p.get_connections(kind="tcp"): if con.family == socket.AF_INET and con.status == "LISTEN": port = con.local_address[1] for proxycon in p.get_connections(kind="tcp"): if proxycon.family == socket.AF_INET and proxycon.status == "ESTABLISHED" and proxycon.local_address[1] == port: kill = 0 # append to kill list self.l.log(3, "checkStreams: %s" % [p.pid, kill]) report.append([p.pid, kill]) # check nomination list for non-existable pid's for nproc in self.nomKill: exist = 0 for p in psutil.process_iter(): cmd = " ".join(psutil.Process(p.pid).cmdline) if p.name == "vlc" and "dummy" not in cmd: if p.pid == nproc: exist = 1 if exist == 0: self.nomKill.remove(nproc) # kill process if not used msg = "" oldNom = self.nomKill[:] for proc in report: if proc[1] == 1 and proc[0] in self.nomKill: process = psutil.Process(proc[0]) process.kill() msg += "killed %s\n" % proc[0] self.nomKill.remove(proc[0]) elif proc[0] in self.nomKill: self.nomKill.remove(proc[0]) msg += "remove %s from nom\n" % proc[0] elif proc[1] == 1: self.nomKill.append(proc[0]) msg += "add %s to nom\n" % proc[0] else: msg += "nothing to do for %s\n" % proc[0] return {"report":report,"oldnom":oldNom,"nominatie":self.nomKill,"msg":msg}
def test_process_iter(self): self.assertIn(os.getpid(), [x.pid for x in psutil.process_iter()]) sproc = get_test_subprocess() self.assertIn(sproc.pid, [x.pid for x in psutil.process_iter()]) p = psutil.Process(sproc.pid) p.kill() p.wait() self.assertNotIn(sproc.pid, [x.pid for x in psutil.process_iter()]) with mock.patch("psutil.Process", side_effect=psutil.NoSuchProcess(os.getpid())): self.assertEqual(list(psutil.process_iter()), []) with mock.patch("psutil.Process", side_effect=psutil.AccessDenied(os.getpid())): with self.assertRaises(psutil.AccessDenied): list(psutil.process_iter())
def killtasks(procnames): for proc in psutil.process_iter(): if proc.name() in procnames: pid = str(proc.as_dict(attrs=['pid'])['pid']) name = proc.as_dict(attrs=['name'])['name'] print "stopping... " + name + " (pid:" + pid + ")" subprocess.call(["sudo", "kill", "-15", pid]) kodiproc = ["kodi", "kodi.bin"] # kodi needs SIGKILL -9 to close for proc in psutil.process_iter(): if proc.name() in kodiproc: pid = str(proc.as_dict(attrs=['pid'])['pid']) name = proc.as_dict(attrs=['name'])['name'] print "stopping... " + name + " (pid:" + pid + ")" subprocess.call(["sudo", "kill", "-9", pid])
def main(): print psutil.cpu_percent() for proc in psutil.process_iter(): try: pinfo = proc.as_dict(attrs=['username', 'pid', 'name', 'cpu_percent', 'memory_percent']) except psutil.NoSuchProcess: pass global mem_usage_memory_profiler parser = argparse.ArgumentParser() parser.add_argument('--process-name', '-n', dest='process_name_dest', help='which process you want to test CPU and Memory', default='sonard', type=str, required=False) args = parser.parse_args() process_name = args.process_name_dest print psutil.process_iter() for proc in psutil.process_iter(): if proc.name() == process_name: print 'test process: %s PID: %s ' % (proc, proc.pid) procids.append(proc.pid) cpu_percent_usage[proc.pid]=[] mem_usage_psutil[proc.pid]=[] test_cpu_mem_percent(process_name, interval=0.5) print_mem(proc_name, 0.5, 20) # init_thread_measure('t_cpu_mem', test_cpu_mem_percent, (process_name,0.5), True ) percs = psutil.cpu_percent(interval=0, percpu=True) print 'CPU Used in %d Processor CPU: %s' % (len(percs), cpu_percent_usage) print 'Memory Percentage Used Tested By psutil: ', mem_usage_psutil print '\n' print 'Memory Tested By Memory_Profiler: ', mem_usage_memory_profiler print '\n' print '############################################################' print 'Test By psutil' # for (k, v) in mem_usage_psutil.items(): # print 'PID: %d' % k # print_avg_max(v) print '############################################################' print 'Test By Memory_Profiler'
def _crawl_metrics(self): assert(self.crawl_mode is not Modes.OUTCONTAINER) created_since = 0 logger.debug('Crawling Metrics') for p in psutil.process_iter(): create_time = ( p.create_time() if hasattr( p.create_time, '__call__') else p.create_time) if create_time <= created_since: continue try: name = (p.name() if hasattr(p.name, '__call__' ) else p.name) pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) status = (p.status() if hasattr(p.status, '__call__' ) else p.status) if status == psutil.STATUS_ZOMBIE: continue username = ( p.username() if hasattr( p.username, '__call__') else p.username) meminfo = ( p.get_memory_info() if hasattr( p.get_memory_info, '__call__') else p.memory_info) ioinfo = ( p.get_io_counters() if hasattr( p.get_io_counters, '__call__') else p.io_counters) cpu_percent = ( p.get_cpu_percent( interval=0) if hasattr( p.get_cpu_percent, '__call__') else p.cpu_percent) memory_percent = ( p.get_memory_percent() if hasattr( p.get_memory_percent, '__call__') else p.memory_percent) feature_key = '{0}/{1}'.format(name, pid) yield (feature_key, MetricFeature( round(cpu_percent, 2), round(memory_percent, 2), name, pid, ioinfo.read_bytes, meminfo.rss, str(status), username, meminfo.vms, ioinfo.write_bytes, )) except Exception as e: logger.error('Error crawling metric for process %s' % pid, exc_info=True) raise CrawlError(e)
def find_processes(): global success """Find_process is meant to validate that all the required processes are running""" process_missing = [] process_list = config['default']['check']['expected_processes'] for process in process_list: process_found_flag = False for item in psutil.process_iter(): for cmd in item.cmdline(): if process in cmd: process_found_flag = True break if not process_found_flag: process_missing.append(process) if len(process_missing) > 0: # if processes were not found print (error + ' Process = {} Not Found' .format(process_missing)) success = False else: print(successful + ' All Processes are running.')
def abort(job_id): """ Abort the given job """ job = logs.dbcmd('get_job', job_id) # job_id can be negative if job is None: print('There is no job %d' % job_id) return elif job.status not in ('executing', 'running'): print('Job %d is %s' % (job.id, job.status)) return name = 'oq-job-%d' % job.id for p in psutil.process_iter(): if p.name() == name: try: os.kill(p.pid, signal.SIGINT) logs.dbcmd('set_status', job.id, 'aborted') print('Job %d aborted' % job.id) except Exception as exc: print(exc) break else: # no break # set job as failed if it is set as 'executing' or 'running' in the db # but the corresponding process is not running anymore logs.dbcmd('set_status', job.id, 'failed') print('Unable to find a process for job %d,' ' setting it as failed' % job.id)
def GetAllAdb(): for p in psutil.process_iter(): try: if 'adb' in p.name: yield p except psutil.error.NoSuchProcess: pass
def _get_finder_cpu_usage(self): """Obtain the CPU usage of the Finder app on OS X. This is used to detect high CPU usage. """ if not sys.platform.startswith('darwin'): return None if not psutil: return None for proc in psutil.process_iter(): if proc.name != 'Finder': continue if proc.username != getpass.getuser(): continue # Try to isolate system finder as opposed to other "Finder" # processes. if not proc.exe.endswith('CoreServices/Finder.app/Contents/MacOS/Finder'): continue return proc.get_cpu_times() return None
def check_process_exists(name): processes = filter(lambda p: check_process_name(name, p), psutil.process_iter()) if not processes: critical("%s is not running" % name) ok("%s is working." % name)
.exe() # 进程的bin路径 .cwd() # 进程的工作目录路径 .status() # 状态 .create_time() # 创建时间 .uids() # uid信息 .gids() # gid信息 .cpu_times() # cpu时间信息,包括user,system .cpu_affinity() # cpu亲和度 .memory_percent() # 内存使用率 .memory_info() # 内存信息,rss,vms .io_counters() # IO信息 .connectios() # 进程列表 .num_threads() # 开启的线程数 .as_dict(attrs=['name','pid']) # 将attrs中的值转成dict ps.process_iter() # 创建一个遍历所有进程的迭代器 ps.win_service_iter() # 创建一个遍历所有服务的迭代器 ps.win_service_get("sername") # 按服务名称检索服务,检索不到返回 psutil.NoSuchProcess 错误 .as_dict() # 以dict形式输出服务的信息 .binpath() # 运行文件路径 .description() # 服务的描述 .display_name() # 服务显示的名称 .name() # 服务名称 .pid() # 服务的PID,未启动的服务pid为None .start_type() # 启动类型 .status() # 服务状态 .username() # 用户名称 # 跟踪程序相关信息 from subprocess import PIPE
def _check_for_game_process(self, game): """Check over all processes because on macOS games are spawn not as client children""" for proc in psutil.process_iter(attrs=['exe'], ad_value=''): if proc.info['exe'] in game.execs: return True return False
def test_worker_stats(shutdown_only): ray.init(num_cpus=1, include_dashboard=True) raylet = ray.nodes()[0] num_cpus = raylet["Resources"]["CPU"] raylet_address = "{}:{}".format( raylet["NodeManagerAddress"], ray.nodes()[0]["NodeManagerPort"] ) channel = init_grpc_channel(raylet_address) stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel) def try_get_node_stats(num_retry=5, timeout=2): reply = None for _ in range(num_retry): try: reply = stub.GetNodeStats( node_manager_pb2.GetNodeStatsRequest(), timeout=timeout ) break except grpc.RpcError: continue assert reply is not None return reply reply = try_get_node_stats() # Check that there is one connected driver. drivers = [ worker for worker in reply.core_workers_stats if worker.worker_type == common_pb2.DRIVER ] assert len(drivers) == 1 assert os.getpid() == drivers[0].pid @ray.remote def f(): ray._private.worker.show_in_dashboard("test") return os.getpid() @ray.remote class Actor: def __init__(self): pass def f(self): ray._private.worker.show_in_dashboard("test") return os.getpid() # Test show_in_dashboard for remote functions. worker_pid = ray.get(f.remote()) reply = try_get_node_stats() target_worker_present = False for stats in reply.core_workers_stats: if stats.webui_display[""] == '{"message": "test", "dtype": "text"}': target_worker_present = True assert stats.pid == worker_pid else: assert stats.webui_display[""] == "" # Empty proto assert target_worker_present # Test show_in_dashboard for remote actors. a = Actor.remote() worker_pid = ray.get(a.f.remote()) reply = try_get_node_stats() target_worker_present = False for stats in reply.core_workers_stats: if stats.webui_display[""] == '{"message": "test", "dtype": "text"}': target_worker_present = True else: assert stats.webui_display[""] == "" # Empty proto assert target_worker_present if _WIN32: timeout_seconds = 40 else: timeout_seconds = 20 start_time = time.time() while True: if time.time() - start_time > timeout_seconds: raise RayTestTimeoutException( "Timed out while waiting for worker processes" ) # Wait for the workers to start. if len(reply.core_workers_stats) < num_cpus + 2: time.sleep(1) reply = try_get_node_stats() print(reply) continue # Check that the rest of the processes are workers, 1 for each CPU. assert len(reply.core_workers_stats) == num_cpus + 2 # Check that all processes are Python. pids = [worker.pid for worker in reply.core_workers_stats] processes = [ p.info["name"] for p in psutil.process_iter(attrs=["pid", "name"]) if p.info["pid"] in pids ] for process in processes: # TODO(ekl) why does travis/mi end up in the process list assert ( "python" in process or "mini" in process or "conda" in process or "travis" in process or "runner" in process or "pytest" in process or "ray" in process ), process break
async def iotop(self, ctx): """Snapshot of I/O usage information output by the kernel""" if not hasattr(psutil.Process, "oneshot"): await ctx.send("Platform not supported") return # first get a list of all processes and disk io counters procs = [p for p in psutil.process_iter()] for p in procs[:]: try: p._before = p.io_counters() except psutil.Error: procs.remove(p) continue disks_before = psutil.disk_io_counters() # sleep some time await asyncio.sleep(1) # then retrieve the same info again for p in procs[:]: with p.oneshot(): try: p._after = p.io_counters() p._cmdline = ' '.join(p.cmdline()) if not p._cmdline: p._cmdline = p.name() p._username = p.username() except (psutil.NoSuchProcess, psutil.ZombieProcess, psutil.AccessDenied): procs.remove(p) disks_after = psutil.disk_io_counters() # finally calculate results by comparing data before and # after the interval for p in procs: p._read_per_sec = p._after.read_bytes - p._before.read_bytes p._write_per_sec = p._after.write_bytes - p._before.write_bytes p._total = p._read_per_sec + p._write_per_sec disks_read_per_sec = disks_after.read_bytes - disks_before.read_bytes disks_write_per_sec = disks_after.write_bytes - disks_before.write_bytes # sort processes by total disk IO so that the more intensive # ones get listed first processes = sorted(procs, key=lambda p: p._total, reverse=True) # print results template = "{0:<5} {1:<7} {2:11} {3:11} {4}\n" msg = "Total DISK READ: {0} | Total DISK WRITE: {1}\n".format( self._size(disks_read_per_sec), self._size(disks_write_per_sec)) msg += template.format("PID", "USER", "DISK READ", "DISK WRITE", "COMMAND") for p in processes: msg += template.format( p.pid, p._username[:7], self._size(p._read_per_sec), self._size(p._write_per_sec), p._cmdline) await self._say(ctx, msg) return
def get_nginx_pids_list(): for process in psutil.process_iter(): if process.name() == 'nginx': yield process.pid
''' File: auto_kill_vs.py File Created: Wednesday, 4th April 2018 4:25:41 pm Author: xss ([email protected]) Description: Auto kill the visual studio progress ----- Last Modified: Wednesday, 28th November 2018 4:59:51 pm Modified By: xss ([email protected]) ----- ''' import psutil import os import time # information = [p.info for p in psutil.process_iter(attrs=['pid', 'name']) if 'devenv' in p.info['name']] # print(information) while ([ p.info for p in psutil.process_iter(attrs=['pid', 'name']) if 'devenv' in p.info['name'] ]): os.system(f"taskkill /F /im devenv.exe") time.sleep(3)
def initialize_VPN(stored_settings=0,save=0,area_input=None,skip_settings=0): ###load stored settings if needed and set input_needed variables to zero if settings are provided### windows_pause = 3 additional_settings_needed = 1 additional_settings_list = list() if stored_settings == 1: instructions = saved_settings_check() additional_settings_needed = 0 input_needed = 0 elif area_input is not None: input_needed = 2 windows_pause = 8 else: input_needed = 1 ###performing system check### opsys = platform.system() ##windows## if opsys == "Windows": print("\33[33mYou're using Windows.\n" "Performing system check...\n" "###########################\n\33[0m") #seek and set windows installation path# option_1_path = 'C:/Program Files/NordVPN' option_2_path = 'C:/Program Files (x86)/NordVPN' custom_path = str() if path.exists(option_1_path) == True: cwd_path = option_1_path elif path.exists(option_2_path) == True: cwd_path = option_2_path else: custom_path = input("\x1b[93mIt looks like you've installed NordVPN in an uncommon folder. Would you mind telling me which folder? (e.g. D:/customfolder/nordvpn)\x1b[0m") while path.exists(custom_path) == False: custom_path = input("\x1b[93mI'm sorry, but this folder doesn't exist. Please double-check your input.\x1b[0m") while os.path.isfile(custom_path+"/NordVPN.exe") == False: custom_path = input("\x1b[93mI'm sorry, but the NordVPN application is not located in this folder. Please double-check your input.\x1b[0m") cwd_path = custom_path print("NordVPN installation check: \33[92m\N{check mark}\33[0m") #check if nordvpn service is already running in the background check_service = "nordvpn-service.exe" in (p.name() for p in psutil.process_iter()) if check_service is False: raise Exception("NordVPN service hasn't been initialized, please start this service in [task manager] --> [services] and restart your script") print("NordVPN service check: \33[92m\N{check mark}\33[0m") # start NordVPN app and disconnect from VPN service if necessary# print("Opening NordVPN app and disconnecting if necessary...") open_nord_win = subprocess.Popen(["nordvpn", "-d"],shell=True,cwd=cwd_path,stdout=DEVNULL) while ("NordVPN.exe" in (p.name() for p in psutil.process_iter())) == False: time.sleep(windows_pause) open_nord_win.kill() print("NordVPN app launched: \33[92m\N{check mark}\33[0m") print("#####################################") ##linux## elif opsys == "Linux": print("\n\33[33mYou're using Linux.\n" "Performing system check...\n" "###########################\n\33[0m") #check if nordvpn is installed on linux# check_nord_linux = check_output(["nordvpn"]) if len(check_nord_linux) > 0: print("NordVPN installation check: \33[92m\N{check mark}\33[0m") else: raise Exception("NordVPN is not installed on your Linux machine.\n" "Follow instructions on shorturl.at/ioDQ2 to install the NordVpn app.") #check if user is logged in. If not, ask for credentials and log in or use credentials from stored settings if available.# check_nord_linux_acc = str(check_output(["nordvpn","account"])) if "not logged in" in check_nord_linux_acc: login_needed = 1 while login_needed == 1: login_message = input("\n\033[34mYou are not logged in. Please provide your credentials in the form of LOGIN/PASSWORD\n\033[0m") try: if instructions['credentials'] in locals(): credentials = stored_settings['credentials'] else: credentials = login_message except: credentials = login_message finally: try: login = credentials.split("/")[0] password = credentials.split("/")[1] except IndexError: error_login = input("\n\033[34mYou have provided your credentials in the wrong format. Press enter and please try again.\n" "Your input should look something like this: [email protected]/password\033[0m") else: login_needed = 0 try: login_nordvpn = check_output(["nordvpn","login","-u",login,"-p",password]) except subprocess.CalledProcessError: raise Exception("\nSorry,something went wrong while trying to log in\n") if "Welcome" in str(login_nordvpn): print("\n\n\033[34mLogin successful!\n\033[0m\n") pass else: raise Exception("\nSorry, NordVPN throws an unexpected message, namely:\n"+str(login_nordvpn)) else: print("NordVPN login check: \33[92m\N{check mark}\33[0m") #provide opportunity to execute additional settings.# settings_input_message = "\n\033[34mDo you want to execute additional settings?\033[0m" while additional_settings_needed == 1 and skip_settings == 0: additional_settings = input(settings_input_message+ "\n_________________________\n\n" "Press enter to continue\n" "Type 'help' for available options\n").strip() if additional_settings == "help": options_linux = pkg_resources.open_text(NordVPN_options, 'options_linux.txt').read().split('\n') for line in options_linux: print(line) additional_settings = input("").strip() additional_settings = str(additional_settings).split(" ") if len(additional_settings[0]) > 0: settings_input_message = additional_settings_linux(additional_settings) if any(re.findall(r'done|already been executed', settings_input_message,re.IGNORECASE)): additional_settings_list.append(additional_settings) else: additional_settings_needed = 0 #however, if provided, just skip the additional settings option and execute the stored settings.# if 'instructions' in locals(): if len(instructions['additional_settings'][0][0]) > 0: print("Executing stored additional settings....\n") for count,instruction in enumerate(instructions['additional_settings']): print("Executing stored setting #"+str(count+1)+": "+str(instruction)) additional_settings_linux(instruction) else: pass else: raise Exception("I'm sorry, NordVPN switcher only works for Windows and Linux machines.") ###provide settings for VPN rotation### ##open available options and store these in a dict## areas_list = pkg_resources.open_text(NordVPN_options, 'countrylist.txt').read().split('\n') country_dict = {'countries':areas_list[0:60],'europe': areas_list[0:36], 'americas': areas_list[36:44], 'africa east india': areas_list[49:60],'asia pacific': areas_list[49:60], 'regions australia': areas_list[60:65],'regions canada': areas_list[65:68], 'regions germany': areas_list[68:70], 'regions india': areas_list[70:72], 'regions united states': areas_list[72:87],'special groups':areas_list[87:len(areas_list)]} ##provide input if needed## while input_needed > 0: if input_needed == 2: print("\nYou've entered a list of connection options. Checking list...\n") try: settings_servers = [area.lower() for area in area_input] settings_servers = ",".join(settings_servers) except TypeError: raise Exception("I expected a list here. Are you sure you've not entered a string or some other object?\n ") else: settings_servers = input("\n\033[34mI want to connect to...\n" "_________________________\n" "Type 'help' for available options\n\033[0m").strip().lower() #define help menu# if settings_servers.lower().strip() == 'help': #notation for specific servers differs between Windows and Linux.# if opsys == "Windows": notation_specific_server = " (e.g. Netherlands #742,Belgium #166)\n" else: notation_specific_server = " (e.g. nl742,be166)\n" settings_servers = input("\nOptions:\n" "##########\n" "* type 'quick' to choose quickconnect \n" "* type 'complete rotation' to rotate between all available NordVPN servers\n" "* Single country or local region (e.g.Germany)\n" "* Regions within country (e.g. regions united states')\n" "* World regions (europe/americas/africa east india/asia pacific)\n" "* Random multiple countries and/or local regions (e.g.France,Netherlands,Chicago)\n" "* Random n countries (e.g. random countries 10)\n" "* Random n countries within larger region (e.g. random countries europe 5)\n" "* Random n regions in country (e.g. random regions United States 6)\n"\ "* Specialty group name (e.g. Dedicated IP,Double VPN)\n" "* Specific list of servers"+notation_specific_server).strip().lower() #set base command according to running os# if opsys == "Windows": nordvpn_command = ["nordvpn", "-c"] if opsys == "Linux": nordvpn_command = ["nordvpn", "c"] #create sample of regions from input.# #1. if quick connect# if settings_servers == "quick": if input_needed == 1: quickconnect_check = input("\nYou are choosing for the quick connect option. Are you sure? (y/n)\n") if 'y' in quickconnect_check: sample_countries = [""] input_needed = 0 pass else: print("\nYou are choosing for the quick connect option.\n") #2. if completely random rotation elif settings_servers == 'complete rotation': print("\nFetching list of all current NordVPN servers...\n") for i in range(5): try: filtered_servers = get_nordvpn_servers() if opsys == "Windows": nordvpn_command.append("-n") sample_countries = filtered_servers['windows_names'] else: sample_countries = filtered_servers['linux_names'] except: print('\nI was unable to fetch the current NordVPN serverlist from https://nordvpn.com/api/server. Check your internet connection.\nRetrying in 60 seconds...\n') continue else: input_needed = 0 break else: raise Exception("\nI'm unable to fetch the current NordVPN serverlist. Check your internet connection.\n") #3. if provided specific servers. Notation differs for Windows and Linux machines, so two options are checked (first is Windows, second is Linux# elif "#" in settings_servers or re.compile(r'^[a-zA-Z]+[0-9]+').search(settings_servers.split(',')[0]) is not None: if opsys == "Windows": nordvpn_command.append("-n") sample_countries = [area.strip() for area in settings_servers.split(',')] input_needed = 0 else: #3. If connecting to some specific group of servers# if opsys == "Windows": nordvpn_command.append("-g") #3.1 if asked for random sample, pull a sample.# if "random" in settings_servers: #determine sample size# samplesize = int(re.sub("[^0-9]", "", settings_servers).strip()) #3.1.1 if asked for random regions within country (e.g. random regions from United States,Australia,...)# if "regions" in settings_servers: try: sample_countries = country_dict[re.sub("random", "", settings_servers).rstrip('0123456789.- ').lower().strip()] input_needed = 0 except: input("\n\nThere are no specific regions available in this country, please try again.\nPress enter to continue.\n") if input_needed == 2: input_needed = 1 continue if re.compile(r'[^0-9]').search(settings_servers.strip()): sample_countries = random.sample(sample_countries, samplesize) #3.1.2 if asked for random countries within larger region# elif any(re.findall(r'europe|americas|africa east india|asia pacific', settings_servers)): larger_region = country_dict[re.sub("random|countries", "", settings_servers).rstrip('0123456789.- ').lower().strip()] sample_countries = random.sample(larger_region,samplesize) input_needed = 0 #3.1.3 if asked for random countries globally# else: if re.compile(r'[^0-9]').search(settings_servers.strip()): sample_countries = random.sample(country_dict['countries'], samplesize) input_needed = 0 else: sample_countries = country_dict['countries'] input_needed = 0 #4. If asked for specific region (e.g. europe)# elif settings_servers in country_dict.keys(): sample_countries = country_dict[settings_servers] input_needed = 0 #5. If asked for specific countries or regions (e.g.netherlands)# else: #check for empty input first.# if settings_servers == "": input("\n\nYou must provide some kind of input.\nPress enter to continue and then type 'help' to view the available options.\n") if input_needed == 2: input_needed = 1 continue else: sample_countries = [area.strip() for area in settings_servers.split(',')] #take into account possible superfluous spaces# approved_regions = 0 for region in sample_countries: if region in [area.lower() for area in areas_list]: approved_regions = approved_regions + 1 pass else: input("\n\nThe region/group " + region + " is not available. Please check for spelling errors.\nPress enter to continue.\n") if input_needed == 2: input_needed = 1 continue if approved_regions == len(sample_countries): input_needed = 0 ##if user does not use preloaded settings## if "instructions" not in locals(): #1.add underscore if spaces are present on Linux os# for number,element in enumerate(sample_countries): if element.count(" ") > 0 and opsys == "Linux": sample_countries[number] = re.sub(" ","_",element) else: pass #2.create instructions dict object# instructions = {'opsys':opsys,'command':nordvpn_command,'settings':sample_countries} if opsys == "Windows": instructions['cwd_path'] = cwd_path if opsys == "Linux": instructions['additional_settings'] = additional_settings_list if 'credentials' in locals(): instructions['credentials'] = credentials #3.save the settings if requested into .txt file in project folder# if save == 1: print("\nSaving settings in project folder...\n") try: os.remove("settings_nordvpn.txt") except FileNotFoundError: pass instructions_write = json.dumps(instructions) f = open("settings_nordvpn.txt", "w") f.write(instructions_write) f.close() print("\nDone!\n") return instructions
def list_all_containers( user_list='ALL', namespace_opts={}, ): """ Returns a list of all running containers, as `Container` objects. A running container is defined as a process subtree with the `pid` namespace different to the `init` process `pid` namespace. """ all_docker_containers = list_docker_containers(namespace_opts) if user_list in ['ALL', 'all', 'All']: init_ns = namespace.get_pid_namespace(1) visited_ns = set() # visited PID namespaces # Start with all docker containers for container in all_docker_containers: curr_ns = namespace.get_pid_namespace(container.pid) if not curr_ns: continue if curr_ns not in visited_ns and curr_ns != init_ns: visited_ns.add(curr_ns) try: yield container except ContainerInvalidEnvironment as e: logger.exception(e) # Continue with all other containers not known to docker for p in psutil.process_iter(): pid = (p.pid() if hasattr(p.pid, '__call__') else p.pid) if pid == 1 or pid == '1': # don't confuse the init process as a container continue if misc.process_is_crawler(p): # don't confuse the crawler process with a container continue curr_ns = namespace.get_pid_namespace(pid) if not curr_ns: # invalid container continue if curr_ns not in visited_ns and curr_ns != init_ns: visited_ns.add(curr_ns) yield Container(pid) else: # User provided a list of containers user_containers = user_list.split(',') for container in all_docker_containers: short_id_match = container.short_id in user_containers long_id_match = container.long_id in user_containers if short_id_match or long_id_match: yield container
def process_is_running(process_id): for process in psutil.process_iter(): if process.pid == process_id: return True return False
def is_running(self): for proc in psutil.process_iter(): if proc.name() == 'indiserver': return True return False
def reset(self): """ Reset energy meter and start sampling from channels specified in the target configuration. """ # Terminate already running iio-capture instance (if any) wait_for_termination = 0 for proc in psutil.process_iter(): if self._iiocapturebin not in proc.cmdline(): continue for channel in self._channels: if self._iio_device(channel) in proc.cmdline(): logging.debug('%14s - Killing previous iio-capture for [%s]', 'ACME', self._iio_device(channel)) logging.debug('%14s - %s', 'ACME', proc.cmdline()) proc.kill() wait_for_termination = 2 # Wait for previous instances to be killed sleep(wait_for_termination) # Start iio-capture for all channels required for channel in self._channels: ch_id = self._channels[channel] # Setup CSV file to collect samples for this channel csv_file = '{}/{}'.format( self._res_dir, 'samples_{}.csv'.format(channel) ) # Start a dedicated iio-capture instance for this channel self._iio[ch_id] = Popen([self._iiocapturebin, '-n', self._hostname, '-o', '-c', '-f', csv_file, self._iio_device(channel)], stdout=PIPE, stderr=STDOUT) # Wait few milliseconds before to check if there is any output sleep(1) # Check that all required channels have been started for channel in self._channels: ch_id = self._channels[channel] self._iio[ch_id].poll() if self._iio[ch_id].returncode: logging.error('%14s - Failed to run %s for %s', 'ACME', self._iiocapturebin, self._str(channel)) logging.warning('\n\n'\ ' Make sure there are no iio-capture processes\n'\ ' connected to %s and device %s\n', self._hostname, self._str(channel)) out, _ = self._iio[ch_id].communicate() logging.error('%14s - Output: [%s]', 'ACME', out.strip()) self._iio[ch_id] = None raise RuntimeError('iio-capture connection error') logging.debug('%14s - Started %s on %s...', 'ACME', self._iiocapturebin, self._str(channel))
async def top(self, ctx): """Snapshot of real-time system information and tasks""" # sleep some time psutil.cpu_percent(interval=None, percpu=True) await asyncio.sleep(1) procs = [] procs_status = {} for p in psutil.process_iter(): try: p.dict = p.as_dict(['username', 'nice', 'memory_info', 'memory_percent', 'cpu_percent', 'cpu_times', 'name', 'status']) try: procs_status[p.dict['status']] += 1 except KeyError: procs_status[p.dict['status']] = 1 except psutil.NoSuchProcess: pass else: procs.append(p) # return processes sorted by CPU percent usage processes = sorted(procs, key=lambda p: p.dict['cpu_percent'], reverse=True) # Print system-related info, above the process list msg = "" num_procs = len(procs) def get_dashes(perc): dashes = "|" * int((float(perc) / 10 * 4)) empty_dashes = " " * (40 - len(dashes)) return dashes, empty_dashes # cpu usage percs = psutil.cpu_percent(interval=0, percpu=True) for cpu_num, perc in enumerate(percs): dashes, empty_dashes = get_dashes(perc) msg += " CPU{0:<2} [{1}{2}] {3:>5}%\n".format(cpu_num, dashes, empty_dashes, perc) mem = psutil.virtual_memory() dashes, empty_dashes = get_dashes(mem.percent) msg += " Mem [{0}{1}] {2:>5}% {3:>6} / {4}\n".format( dashes, empty_dashes, mem.percent, str(int(mem.used / 1024 / 1024)) + "M", str(int(mem.total / 1024 / 1024)) + "M" ) # swap usage swap = psutil.swap_memory() dashes, empty_dashes = get_dashes(swap.percent) msg += " Swap [{0}{1}] {2:>5}% {3:>6} / {4}\n".format( dashes, empty_dashes, swap.percent, str(int(swap.used / 1024 / 1024)) + "M", str(int(swap.total / 1024 / 1024)) + "M" ) # processes number and status st = [] for x, y in procs_status.items(): if y: st.append("%s=%s" % (x, y)) st.sort(key=lambda x: x[:3] in ('run', 'sle'), reverse=True) msg += " Processes: {0} ({1})\n".format(num_procs, ', '.join(st)) # load average, uptime uptime = datetime.datetime.now() - datetime.datetime.fromtimestamp(psutil.boot_time()) if not hasattr(os, "getloadavg"): msg += " Load average: N/A Uptime: {0}".format( str(uptime).split('.')[0]) else: av1, av2, av3 = os.getloadavg() msg += " Load average: {0:.2f} {1:.2f} {2:.2f} Uptime: {3}".format( av1, av2, av3, str(uptime).split('.')[0]) await self._say(ctx, msg) # print processes template = "{0:<6} {1:<9} {2:>5} {3:>8} {4:>8} {5:>8} {6:>6} {7:>10} {8:>2}\n" msg = template.format("PID", "USER", "NI", "VIRT", "RES", "CPU%", "MEM%", "TIME+", "NAME") for p in processes: # TIME+ column shows process CPU cumulative time and it # is expressed as: "mm:ss.ms" if p.dict['cpu_times'] is not None: ctime = datetime.timedelta(seconds=sum(p.dict['cpu_times'])) ctime = "%s:%s.%s" % (ctime.seconds // 60 % 60, str((ctime.seconds % 60)).zfill(2), str(ctime.microseconds)[:2]) else: ctime = '' if p.dict['memory_percent'] is not None: p.dict['memory_percent'] = round(p.dict['memory_percent'], 1) else: p.dict['memory_percent'] = '' if p.dict['cpu_percent'] is None: p.dict['cpu_percent'] = '' if p.dict['username']: username = p.dict['username'][:8] else: username = '' msg += template.format(p.pid, username, p.dict['nice'] or '', self._size(getattr(p.dict['memory_info'], 'vms', 0)), self._size(getattr(p.dict['memory_info'], 'rss', 0)), p.dict['cpu_percent'], p.dict['memory_percent'], ctime, p.dict['name'] or '') await self._say(ctx, msg) return
async def ps(self, ctx): """Information about active processes""" PROC_STATUSES_RAW = { psutil.STATUS_RUNNING: "R", psutil.STATUS_SLEEPING: "S", psutil.STATUS_DISK_SLEEP: "D", psutil.STATUS_STOPPED: "T", psutil.STATUS_TRACING_STOP: "t", psutil.STATUS_ZOMBIE: "Z", psutil.STATUS_DEAD: "X", psutil.STATUS_WAKING: "WA", psutil.STATUS_IDLE: "I", psutil.STATUS_LOCKED: "L", psutil.STATUS_WAITING: "W", } if hasattr(psutil, 'STATUS_WAKE_KILL'): PROC_STATUSES_RAW[psutil.STATUS_WAKE_KILL] = "WK" if hasattr(psutil, 'STATUS_SUSPENDED'): PROC_STATUSES_RAW[psutil.STATUS_SUSPENDED] = "V" today_day = datetime.date.today() template = "{0:<10} {1:>5} {2:>4} {3:>4} {4:>7} {5:>7} {6:>13} {7:>5} {8:>5} {9:>7} {10}\n" attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times', 'create_time', 'memory_info', 'status'] if os.name == 'posix': attrs.append('uids') attrs.append('terminal') msg = template.format("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY", "STAT", "START", "TIME", "COMMAND") for p in psutil.process_iter(): try: pinfo = p.as_dict(attrs, ad_value='') except psutil.NoSuchProcess: pass else: if pinfo['create_time']: ctime = datetime.datetime.fromtimestamp(pinfo['create_time']) if ctime.date() == today_day: ctime = ctime.strftime("%H:%M") else: ctime = ctime.strftime("%b%d") else: ctime = '' cputime = time.strftime("%M:%S", time.localtime(sum(pinfo['cpu_times']))) try: user = p.username() except KeyError: if os.name == 'posix': if pinfo['uids']: user = str(pinfo['uids'].real) else: user = '' else: raise except psutil.Error: user = '' if os.name == 'nt' and '\\' in user: user = user.split('\\')[1] vms = pinfo['memory_info'] and int(pinfo['memory_info'].vms / 1024) or '?' rss = pinfo['memory_info'] and int(pinfo['memory_info'].rss / 1024) or '?' memp = pinfo['memory_percent'] and round(pinfo['memory_percent'], 1) or '?' status = PROC_STATUSES_RAW.get(pinfo['status'], pinfo['status']) msg += template.format( user[:10], pinfo['pid'], pinfo['cpu_percent'], memp, vms, rss, pinfo.get('terminal', '') or '?', status, ctime, cputime, pinfo['name'].strip() or '?') await self._say(ctx, msg) return
def is_program_started(self): """Check if there is a process with the name "webots-bin" running.""" return "webots-bin" in (p.name() for p in psutil.process_iter())
def GetProcess(): process = filter(lambda p: p.name() == "Scan64.Exe", psutil.process_iter()) for process_id in process: print "[*] PID: %s" % process_id.pid return process_id.pid
def get_nginx_listened_ports(): for connection in psutil.net_connections(kind='tcp4'): if connection.status == "LISTEN" and connection.pid in [x.pid for x in psutil.process_iter() if x.name() == 'nginx']: yield connection.laddr.port
def OnRestoreClick(self, event): pythons_psutil = [] opencpn_found = False openplotter_found = False openplotter_serial_found = False signalk_found = False for p in psutil.process_iter(): try: if p.name() == 'opencpn': opencpn_found = True if p.name() == 'openplotter': openplotter_found = True if p.name() == 'openplotter-serial': openplotter_serial_found = True if p.name() == 'signalk-server': signalk_found = True except psutil.Error: pass if opencpn_found == True: wx.MessageBox('OpenCPN is running. Please stop it before restoring a copy of the configuration file', 'Info', wx.OK | wx.ICON_INFORMATION) opencpn_found = False return if openplotter_found == True: wx.MessageBox('OpenPlotter is running. Please stop it before restoring a copy of the configuration file', 'Info', wx.OK | wx.ICON_INFORMATION) openplotter_found = False return if openplotter_serial_found == True: wx.MessageBox('Serial is running. Please stop it before restoring a copy of the configuration file', 'Info', wx.OK | wx.ICON_INFORMATION) openplotter_serial_found = False return if signalk_found == True: wx.MessageBox('SignalK is running. Please restart it after restoring a copy of the configuration file', 'Info', wx.OK | wx.ICON_INFORMATION) signalk_found = False self.source = self.m_dirPickerFileDir.GetPath() if not os.path.exists(self.source): wx.MessageBox('The source directory ' + self.source + ' does not exist.', 'Info', wx.OK | wx.ICON_ERROR) return self.errorMsg = '' if self.m_checkBoxAll.IsChecked(): self.restoreOCPNConf() if self.m_bOpenPlotter: self.restoreOPConf() self.restoreSKConf() self.restoreKPlexConf() self.restoreOCPNData() self.restorePluginData() else: if self.m_checkBoxOCPNConfig.IsChecked(): self.restoreOCPNConf() if self.m_checkBoxOpenPlotterConfig.IsChecked(): self.restoreOPConf() self.restoreSKConf() self.restoreKPlexConf() if self.m_checkBoxOCPNData.IsChecked(): self.restoreOCPNData() if self.m_checkBoxPluginData.IsChecked(): self.restorePluginData() if len(self.errorMsg) != 0: wx.MessageBox('Errors restoring\n' + self.errorMsg, 'Info', wx.OK | wx.ICON_INFORMATION) self.m_staticTextMessage.SetLabel('Restore process completed with errors') else: self.m_staticTextMessage.SetLabel('Restore process completed') SaveRestoreFilesDef.OnRestoreClick(self, event)
def start(self, join_ring=True, no_wait=False, verbose=False, update_pid=True, wait_other_notice=None, replace_token=None, replace_address=None, jvm_args=None, wait_for_binary_proto=None, profile_options=None, use_jna=False, quiet_start=False): """ Start the node. Options includes: - join_ring: if false, start the node with -Dcassandra.join_ring=False - no_wait: by default, this method returns when the node is started and listening to clients. If no_wait=True, the method returns sooner. - wait_other_notice: if True, this method returns only when all other live node of the cluster have marked this node UP. - replace_token: start the node with the -Dcassandra.replace_token option. - replace_address: start the node with the -Dcassandra.replace_address option. Extra command line options may be passed using the SCYLLA_EXT_OPTS environment variable. Extra environment variables for running scylla can be passed using the SCYLLA_EXT_ENV environment variable. Those are represented in a single string comprised of one or more pairs of "var=value" separated by either space or semicolon (';') """ if wait_for_binary_proto is None: wait_for_binary_proto = self.cluster.force_wait_for_cluster_start and not no_wait if wait_other_notice is None: wait_other_notice = self.cluster.force_wait_for_cluster_start and not no_wait if jvm_args is None: jvm_args = [] scylla_cassandra_mapping = { '-Dcassandra.replace_address_first_boot': '--replace-address-first-boot' } # Replace args in the form # ['-Dcassandra.foo=bar'] to ['-Dcassandra.foo', 'bar'] translated_args = [] new_jvm_args = [] for jvm_arg in jvm_args: if '=' in jvm_arg: split_option = jvm_arg.split("=") e_msg = ("Option %s not in the form '-Dcassandra.foo=bar'. " "Please check your test" % jvm_arg) assert len(split_option) == 2, e_msg option, value = split_option # If we have information on how to translate the jvm option, # translate it if option in scylla_cassandra_mapping: translated_args += [ scylla_cassandra_mapping[option], value ] # Otherwise, just pass it as is else: new_jvm_args.append(jvm_arg) else: new_jvm_args.append(jvm_arg) jvm_args = new_jvm_args if self.is_running(): raise NodeError("%s is already running" % self.name) for itf in list(self.network_interfaces.values()): if itf is not None and replace_address is None: try: common.check_socket_available(itf) except Exception as msg: print("{}. Looking for offending processes...".format(msg)) for proc in psutil.process_iter(): if any(self.cluster.ipprefix in cmd for cmd in proc.cmdline()): print("name={} pid={} cmdline={}".format( proc.name(), proc.pid, proc.cmdline())) raise msg marks = [] if wait_other_notice: marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_live()] self.mark = self.mark_log() launch_bin = common.join_bin(self.get_path(), 'bin', 'scylla') options_file = os.path.join(self.get_path(), 'conf', 'scylla.yaml') # TODO: we do not support forcing specific settings # TODO: workaround for api-address as we do not load it # from config file scylla#59 conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF) with open(conf_file, 'r') as f: data = yaml.safe_load(f) jvm_args = jvm_args + ['--api-address', data['api_address']] jvm_args = jvm_args + [ '--collectd-hostname', '%s.%s' % (socket.gethostname(), self.name) ] # Let's add jvm_args and the translated args args = [ launch_bin, '--options-file', options_file, '--log-to-stdout', '1' ] + jvm_args + translated_args # Lets search for default overrides in SCYLLA_EXT_OPTS scylla_ext_opts = os.getenv('SCYLLA_EXT_OPTS', "").split() opts_i = 0 orig_args = list(args) while opts_i < len(scylla_ext_opts): if scylla_ext_opts[opts_i].startswith("--scylla-manager="): opts_i += 1 elif scylla_ext_opts[opts_i].startswith('-'): add = False if scylla_ext_opts[opts_i] not in orig_args: add = True args.append(scylla_ext_opts[opts_i]) opts_i += 1 while opts_i < len(scylla_ext_opts) and not scylla_ext_opts[ opts_i].startswith('-'): if add: args.append(scylla_ext_opts[opts_i]) opts_i += 1 if '--developer-mode' not in args: args += ['--developer-mode', 'true'] if '--smp' not in args: # If --smp is not passed from cmdline, use default (--smp 1) args += ['--smp', str(self._smp)] elif self._smp_set_during_test: # If node.set_smp() is called during the test, ignore the --smp # passed from the cmdline. args[args.index('--smp') + 1] = str(self._smp) else: # Update self._smp based on command line parameter. # It may be used below, along with self._mem_mb_per_cpu, for calculating --memory self._smp = int(args[args.index('--smp') + 1]) if '--memory' not in args: # If --memory is not passed from cmdline, use default (512M per cpu) args += [ '--memory', '{}M'.format(self._mem_mb_per_cpu * self._smp) ] elif self._mem_set_during_test: # If node.set_mem_mb_per_cpu() is called during the test, ignore the --memory # passed from the cmdline. args[args.index('--memory') + 1] = '{}M'.format( self._mem_mb_per_cpu * self._smp) if '--default-log-level' not in args: args += ['--default-log-level', self.__global_log_level] if self.scylla_mode( ) == 'debug' and '--blocked-reactor-notify-ms' not in args: args += ['--blocked-reactor-notify-ms', '5000'] # TODO add support for classes_log_level if '--collectd' not in args: args += ['--collectd', '0'] if '--cpuset' not in args: args += ['--overprovisioned'] if '--prometheus-address' not in args: args += ['--prometheus-address', data['api_address']] if replace_address: args += ['--replace-address', replace_address] args += ['--unsafe-bypass-fsync', '1'] ext_env = {} scylla_ext_env = os.getenv('SCYLLA_EXT_ENV', "").strip() if scylla_ext_env: scylla_ext_env = re.split(r'[; ]', scylla_ext_env) for s in scylla_ext_env: try: [k, v] = s.split('=', 1) except ValueError as e: print("Bad SCYLLA_EXT_ENV variable: {}: {}", s, e) else: ext_env[k] = v scylla_process = self._start_scylla(args, marks, update_pid, wait_other_notice, wait_for_binary_proto, ext_env) self._start_jmx(data) ip_addr = data['listen_address'] jmx_port = int(self.jmx_port) if not self._wait_java_up(ip_addr, jmx_port): e_msg = "Error starting node {}: unable to connect to scylla-jmx port {}:{}".format( self.name, ip_addr, jmx_port) raise NodeError(e_msg, scylla_process) self.is_running() if self.scylla_manager and self.scylla_manager.is_agent_available: self._start_scylla_manager_agent() return scylla_process
import sys import signal import psutil import configparser import odoorpc import pyodbc import os from datetime import datetime # only continue if quickbooks is running print("\nStarting Odoo Sync") if "QBW32.EXE" not in (p.name() for p in psutil.process_iter()): sys.exit("Can't continue without QuickBooks running") # get and read config file dir_name = os.path.dirname(__file__) conf_file_name = os.path.join(dir_name, 'qodbc_push_odoo.ini') parser = configparser.RawConfigParser() parser.read(conf_file_name) # get odoo config values host = parser['ODOO']['host'] port = parser['ODOO']['port'] protocol = parser['ODOO']['protocol'] db = parser['ODOO']['db'] username = parser['ODOO']['username'] password = parser['ODOO']['password'] # get quickbooks config values dsn = parser['QB']['dsn'] sync_time = parser['QB']['sync_time']
def on_created(self, event): print("\nFile created: '{}'.".format(event.src_path)) # Check if sim is running (https://stackoverflow.com/a/7788702) if not 'FlightSimulator.exe' in (p.name() for p in psutil.process_iter()): print( 'Warning: The simulator is not running. Not going to add GPS data to this file.' ) return # Connect to sim print('Getting data from sim..') sm = SimConnect() aq = AircraftRequests(sm) # Get data from sim data = { 'GPSLatitude': round(aq.get("GPS_POSITION_LAT"), 5), # degrees 'GPSLongitude': round(aq.get("GPS_POSITION_LON"), 5), # degrees 'GPSAltitude': round(aq.get('GPS_POSITION_ALT'), 2), # meter 'GPSSpeed': aq.get('GPS_GROUND_SPEED') # m/s } # Disconnect from sim sm.exit() # Check if player is not in flight if round(data['GPSLatitude'], 2) < 0.1 and round( data['GPSLongitude'], 2) < 0.1 and data['GPSSpeed'] < 0.1: print( 'Warning: It looks like the player is in a menu. Not going to add GPS data to this file.' ) return # Set additional tags (https://exiftool.org/TagNames/GPS.html) data['GPSLatitudeRef'] = 'North' if data['GPSLatitude'] < 0: data['GPSLatitudeRef'] = 'South' data['GPSLongitudeRef'] = 'East' if data['GPSLongitude'] < 0: data['GPSLongitudeRef'] = 'West' data['GPSAltitudeRef'] = 'Above Sea Level' data['GPSSpeed'] = round(data['GPSSpeed'] * 3.6, 2) # Convert m/s to km/h data['GPSSpeedRef'] = 'km/h' # Set unit to km/h # Compile exiftool command cmdline = [exiftool] for key, value in data.items(): if value == -999999: print('Warning: invalid value for {}: {}.'.format(key, value)) continue cmdline.append('-{}={}'.format(key, value)) if OVERWRITE: cmdline.append('-overwrite_original') if DEBUG: cmdline.append('-verbose') cmdline.append(event.src_path) if DEBUG: import json print('data:', json.dumps(data, indent=4)) print(cmdline) # Add GPS data to screenshot print('Adding EXIF data to image..') process = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE) process.communicate(input=b'\n') # Pass exiftool's '-- press ENTER --' print('Finished. Watching for changes..') self.process(event)
from subprocess import check_output import psutil # Iterate over all running process for proc in psutil.process_iter(): try: # Get process name & pid from process object. processName = proc.name() processID = proc.pid if "EscapeFromTarkov" in processName or 'calc' in processName: print(processName, ' ::: ', processID) except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): print("Except:", proc)
def reap_process_group(pgid, log, sig=signal.SIGTERM, timeout=DEFAULT_TIME_TO_WAIT_AFTER_SIGTERM): """ Tries really hard to terminate all processes in the group (including grandchildren). Will send sig (SIGTERM) to the process group of pid. If any process is alive after timeout a SIGKILL will be send. :param log: log handler :param pgid: process group id to kill :param sig: signal type :param timeout: how much time a process has to terminate """ returncodes = {} def on_terminate(p): log.info("Process %s (%s) terminated with exit code %s", p, p.pid, p.returncode) returncodes[p.pid] = p.returncode def signal_procs(sig): try: os.killpg(pgid, sig) except OSError as err: # If operation not permitted error is thrown due to run_as_user, # use sudo -n(--non-interactive) to kill the process if err.errno == errno.EPERM: subprocess.check_call( ["sudo", "-n", "kill", "-" + str(int(sig))] + [str(p.pid) for p in children]) else: raise if pgid == os.getpgid(0): raise RuntimeError("I refuse to kill myself") try: parent = psutil.Process(pgid) children = parent.children(recursive=True) children.append(parent) except psutil.NoSuchProcess: # The process already exited, but maybe it's children haven't. children = [] for p in psutil.process_iter(): try: if os.getpgid(p.pid) == pgid and p.pid != 0: children.append(p) except OSError: pass log.info("Sending %s to GPID %s", sig, pgid) try: signal_procs(sig) except OSError as err: # No such process, which means there is no such process group - our job # is done if err.errno == errno.ESRCH: return returncodes gone, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.warning( "process %s did not respond to SIGTERM. Trying SIGKILL", p) try: signal_procs(signal.SIGKILL) except OSError as err: if err.errno != errno.ESRCH: raise _, alive = psutil.wait_procs(alive, timeout=timeout, callback=on_terminate) if alive: for p in alive: log.error("Process %s (%s) could not be killed. Giving up.", p, p.pid) return returncodes
def run(self): """ Overwrite GenericSensor.run() method. This method will be called when the sensor started""" while 1: ###################### # perform collection # ###################### process_dict = {} # loop through processes for process in psutil.process_iter(): try: # Don't store processes without cmdlines if not process.cmdline: continue ####################### # collect information # ####################### name = "ns:{0}:{1}".format(process.pid, process.name) cmdline = ':'.join(process.cmdline) # general information proc_object = {'object-type' : 'process', 'object-name' : name, 'process-id' : process.pid, 'process-type' : process.name, 'user' : 'ns:'+str(process.username), 'command-line' : cmdline, 'num-threads' : process.get_num_threads(), 'start-time' : datetime.datetime.fromtimestamp(process.create_time), } # Memory & CPU utilization proc_object['memory-load'] = process.get_memory_percent() # TODO: use non-blocking call for now, but the % will derive from CPU times elapsed since last call proc_object['cpu-load'] = process.get_cpu_percent(interval=0) #TODO: Socket connections # conns = process.get_connections() # if conns: # print conns # proc['local-connection'], proc['remote-connection'] = getConnLists(conns) # Extra information try: if process.terminal: proc_object['terminal'] = process.terminal proc_object['exe'] = process.exe proc_object['cwd'] = process.getcwd() io_counter = process.get_io_counters() proc_object['read-count'] = io_counter.read_count proc_object['write-count'] = io_counter.write_count proc_object['read-bytes'] = io_counter.read_bytes proc_object['write-bytes'] = io_counter.write_bytes except psutil.AccessDenied: pass #TODO: get file object # try: # print process.get_open_files() # and Try and see if any of the cmd is a file # except psutil.AccessDenied: # pass except psutil.NoSuchProcess: continue ############################ # added to key-value store # ############################ # add a process object into a class dict variable process_dict[proc_object['object-name']] = proc_object ##################### # update and delete # ##################### # call super's function to perform updating and deleting self.updates_and_deletes(process_dict) ####################### # sleep for some time # ####################### # note it will take 0.07 second to get info, and 0.23 to update in the db time.sleep(REFRESH_RATE)
def close(): for proc in psutil.process_iter(): if proc.name() == "display": proc.kill()
login, password = getCredentials() launchMinecraft() login_successfull = False times_launched = 0 while not (login_successfull): try: login_successfull = loginIE(login, password) sleep(0.5) times_launched += 1 if times_launched > 1200: return False except: print("something went completely wrong...") #launchMinecraft() return True procs = [ p for p in psutil.process_iter() if 'MinePS.exe' in p.name() or 'MinePR.exe' in p.name() ] # and __file__ in p.cmdline()] if len(procs) > 2: print('Process is already running...') launchMinecraft() sys.exit(1) launchMine("PR")
def _all_processes(self) -> List[psutil.Process]: return sorted(psutil.process_iter(attrs=['pid', 'cmdline']), key=lambda p: p.create_time(), reverse=True)
def runcommand_process(self) -> Optional[psutil.Process]: all_processes = psutil.process_iter(attrs=['pid', 'cmdline']) return next( filter(lambda p: 'runcommand.sh' in ' '.join(p.info['cmdline']), self._all_processes()), None)
# print(read_distance_file) # print(NonFRBuild_file) # print(TwoLayerBuild_file) # print(ThreeLayerBuild_file) #print(read_distance_path) #Save the start time to record the length of the full study full_study_start_time = time.time() #Time Read Distance Scene read_distance_start_time = time.time() #Open read distance scene and stay until close call(read_distance_filepath) while (read_distance_file in (p.name() for p in psutil.process_iter())): #stay in this loop until the exe has been closed continue read_distance_time = time.time() - read_distance_start_time read_distance_time = round(read_distance_time, 2) #Time Scene One #Time Scene Two #Time Scene Three path_list = [] path_list.append(NonFRBuild_filepath) path_list.append(TwoLayerBuild_filepath) path_list.append(ThreeLayerBuild_filepath) file_list = []
def foo(): # print "\n******************************************" print( "\033[1;37;40m \033[4mFunction\033[0m \033[0;37;40m \033[1;37;40m \033[4mStatus\033[0m \033[0;37;40m \n" ) # print("\033[2;37;40m Function \033[0;37;40m" + "\033[2;37;40m Status \033[0;37;40m\n") # print("\033[1;37;40m Bright Colour\033[0;37;40m \n") # print("\033[1;32;40m Bright Green \n") time_ = " " + time.ctime() print(time_) # global update_couner for process in psutil.process_iter(): if process.cmdline() == ['python', '*watch_for_changes.py']: sys.exit('Process found: exiting.') p = subprocess.Popen("sudo systemctl status snapd.service", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (running)' in output: print '\n\033[1;33;40m SNAP \033[0m is \033[1;32;40m running \033[0m' snap_counter = 0 else: print 'SNAP is NOT running' os.system("echo 'SNAP is NOT running'") os.system("sudo systemctl start snapd.service") snap_counter = snap_count + 1 if int(snap_couner) == 3: print "SNAP is not running for 10 minutes" os.system( "python /home/tein/active_monitoring/agent/agent_report.py agent_report snapd service -{0} -{1}" .format(FROM_BOX_SITE, time_)) snap_counter = 0 p = subprocess.Popen("service zookeeper status", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (running)' in output: print '\n\033[1;33;40m ZooKeeper \033[0m is \033[1;32;40m running \033[0m' else: print 'Zookeper is NOT running' os.system("echo 'Zookeeper is NOT running'") os.system("sudo systemctl start zookeeper") p = subprocess.Popen("sudo systemctl status control-plane-tracing.service", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (running)' in output: print '\n\033[1;33;40m control-plane-tracing \033[0m is \033[1;32;40m running \033[0m' else: print ' control-plane-tracing is NOT running' os.system("echo ' control-plane-tracing is NOT running'") os.system("sudo systemctl start control-plane-tracing.service") p = subprocess.Popen("sudo systemctl status data-plane-tracing.service", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (running)' in output: print '\n\033[1;33;40m data-plane-tracing \033[0m is \033[1;32;40m running \033[0m' else: print 'data-plane-tracing is NOT running' os.system("echo 'data-plane-tracing is NOT running'") os.system("sudo systemctl start data-plane-tracing.service") p = subprocess.Popen( "sudo systemctl status management-plane-tracing.service", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (running)' in output: print '\n\033[1;33;40m management-plane-tracing \033[0m is \033[1;32;40m running \033[0m' else: print 'management-plane-tracing is NOT running' os.system("echo 'management-plane-tracing is NOT running'") os.system("sudo systemctl start management-plane-tracing.service") for process in psutil.process_iter(): if process.cmdline() == ['python', '*watch_for_changes.py']: sys.exit('Process found: exiting.') p = subprocess.Popen( "sudo systemctl status perfsonar-toolkit-config-daemon.service", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if 'active (exited)' in output: print '\n\033[1;33;40m perfsonar-toolkit \033[0m is \033[1;32;40m running \033[0m' else: print 'perfsonar-toolkit is NOT running' os.system("echo 'perfsonar-toolkit is NOT running'") # os.system("sudo systemctl start perfsonar-toolkit-config-daemon.service") # print "First way" p = subprocess.Popen("pgrep -af python | grep watch_for_changes.py", stdout=subprocess.PIPE, shell=True) # p = subprocess.Popen("ps -aux | grep watch_for_changes.py | grep python", stdout=subprocess.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() ## print output ## print output.split('\n', 1)[0] find = re.search(r'\b(watch_for_changes.py)\b', output) ## print (find.start()) line = output.split('\n', 1)[0] word = line.split(' ') ## print word length = len(word) ## print length # string = output.split(" ") # index = string.index('watch_for_changes.py') # print index ## print "Second way" ## pipe1 = subprocess.Popen("ps -aux | grep watch_for_changes.py | grep python", shell=True, stdout=subprocess.PIPE).stdout ## count1 = pipe1.read() ## print count1.splitlines()[0] ## print "third way" ## print output.splitlines()[0] ## print 'fourth way' ## print output # for line in output # print line.readline()[1:] # count = subprocess.check_output("ps -aux | grep -c watch_for_changes.py") # cmd = [ 'ps','-aux','|','grep','-c','watch_for_changes.py'] # count= subprocess.Popen( cmd, stdout=subprocess.PIPE ).communicate()[0] pipe = subprocess.Popen("ps -aux | grep -c watch_for_changes.py", shell=True, stdout=subprocess.PIPE).stdout count = pipe.read() ## print count # if int(count) < 3: # print ("NOT Running") # elif int(count)>2: # print ("Running") # search = if ("watch_for_changes.py" in output) & (int(count) > 2) & (length == 3): print '\n\033[1;33;40m local Update \033[0m is \033[1;32;40m running \033[0m' else: print '\n\033[1;33;40m local update watch \033[0m is \033[1;31;40m NOT running \033[0m' global update_counter update_counter = update_counter + 1 ## print update_counter # global update_couner if int(update_counter) == 3: print "\n\033[0;37;44m Update local Watch is not running for 10 minutes \033[0;m" os.system( "python /home/tein/active_monitoring/agent/agent_report.py agent_report update_watch_service {0}" .format(FROM_BOX_SITE)) # global update_couner update_counter = 0 os.system("python /home/tein/update/watch_for_changes.py &") toolbar_width = 40 # setup toolbar sys.stdout.write("\n") sys.stdout.write("[%s]" % (" " * toolbar_width)) sys.stdout.flush() sys.stdout.write("\b" * (toolbar_width + 1)) # return to start of line, after '[' for i in xrange(toolbar_width): time.sleep(1.15) # do real work here # update the bar sys.stdout.write("-") sys.stdout.flush() sys.stdout.write("\n") threading.Timer(0, foo).start()