Beispiel #1
0
def main():
    print('Process (%s) start...' % os.getpid())
    # Only works on Unix/Linux/Mac:
    pid = os.fork()
    if pid == 0:
        print('I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid()))
    else:
        print('I (%s) just created a child process (%s).' % (os.getpid(), pid))

    print('Parent process %s.' % os.getpid())
    p = Process(target=run_proc, args=('test',))
    print('Child process will start.')
    p.start()
    p.join()
    print('Child process end.')

    print '================================'
    print('Parent process %s.' % os.getpid())
    p = Pool(4)
    for i in range(5):
        p.apply_async(long_time_task, args=(i,))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')

    print('$ nslookup www.python.org')
    r = subprocess.call(['nslookup', 'www.python.org'])
    print('Exit code:', r)

    print('$ nslookup')
    p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    output, err = p.communicate(b'set q=mx\npython.org\nexit\n')
    print(output.decode('utf-8'))
    print('Exit code:', p.returncode)
if __name__=='__main__':
    print('Parent process %s.' % os.getpid())
    p = Pool(4)
    for i in range(5):
        p.apply_async(long_time_task, args=(i,))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')
"""对Pool对象调用join()方法会等待所有子进程执行完毕,调用join()之前必须先调用close(),调用close()之后就不能继续添加新的Process了。
请注意输出的结果,task 0,1,2,3是立刻执行的,而task 4要等待前面某个task完成后才执行,这是因为Pool的默认大小在我的电脑上是4,因此,最多同时执行4个进程。这是Pool有意设计的限制,并不是操作系统的限制。如果改成:
p = Pool(5)
就可以同时跑5个进程。
由于Pool的默认大小是CPU的核数,如果你不幸拥有8核CPU,你要提交至少9个子进程才能看到上面的等待效果。"""

#子进程
"""很多时候,子进程并不是自身,而是一个外部进程。我们创建了子进程后,还需要控制子进程的输入和输出。
subprocess模块可以让我们启动一个子进程,然后控制其输入和输出
下面的例子演示了如何在Python代码中运行命令nslookup www.python.org,这和命令行直接运行的效果是一样的:"""
import subprocess
print('$ nslookup www.python.org')
r = subprocess.call(['nslookup','www.python.org'])
print('Exit code:',r)
"""如果子进程还需要输入,则可以通过communicate()方法输入:"""
import subprocess
print('$ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
Beispiel #3
0
	������#服务器:  UnKnown
	Address:  192.168.43.1

	����#名称:    dualstack.python.map.fastly.net
	Addresses:  2a04:4e42:11::223
		  151.101.228.223
	Aliases:  www.python.org

	Exit code: 0

	# 如果子进程还需要输入,则可以通过communicate()方法输入:
	import subprocess

	print('$ nslookup')
	p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
	output, err = p.communicate(b'set q=mx\npython.org\nexit\n')
	print(output.decode('utf-8'))
	print('Exit code:', p.returncode)
	# 上面的代码相当于在命令行执行命令nslookup,然后手动输入:

	set q=mx
	python.org
	exit

	# 运行结果
	$ nslookup
	Server:        192.168.19.4
	Address:    192.168.19.4#53

	Non-authoritative answer:
	python.org    mail exchanger = 50 mail.python.org.
concat_files_4 = "cat window_*_montage_4.txt > " + name[-1] + "_4.txt"
concat_files_5 = "cat window_*_montage_5.txt > " + name[-1] + "_5.txt"
concat_files_6 = "cat window_*_montage_6.txt > " + name[-1] + "_6.txt"
concat_files_7 = "cat window_*_montage_7.txt > " + name[-1] + "_7.txt"
concat_files_8 = "cat window_*_montage_8.txt > " + name[-1] + "_8.txt"
concat_files_9 = "cat window_*_montage_9.txt > " + name[-1] + "_9.txt"
concat_files_10 = "cat window_*_montage_10.txt > " + name[-1] + "_10.txt"
concat_files_11 = "cat window_*_montage_11.txt > " + name[-1] + "_11.txt"

concat_files_seiz = "cat window_*_seizure_results.txt > " + name[-1] + "_seiz.txt"

d = "/home/pi/algorithm/UI/data"

p1 = Popen(["ls", "-tr"], cwd=d,stdout=PIPE)
p2 = Popen(concat_files_0 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]

p2 = Popen(concat_files_1 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]

p2 = Popen(concat_files_2 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]

p2 = Popen(concat_files_3 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]

p2 = Popen(concat_files_4 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]

p2 = Popen(concat_files_5 ,shell=True, cwd = d, stdin=p1.stdout,stdout=PIPE)
p2.communicate()[0]
print('fork: \nProcess (%s) start...' % os.getpid())
pid = os.fork()  # fork 轻松创建子进程
if pid == 0:  # 子进程返回 0, 父进程返回子进程的 id, getppid() 得到父进程 pid
    print('I am child process (%s) and my parent is (%s).' % (os.getpid(), os.getppid()))
    exit(0)  # 子进程执行打这里就退出, 不执行后面的
else: print('I (%s) just created a child process (%s).' % (os.getpid(), pid))

##################################################################
## multiprocessing Process
from multiprocessing import Process # fork 无法在 Windows 上运行, Process 可以跨平台
def run_proc(name): print('Run child process %s (%s)...' % (name, os.getpid()))  # 子进程要执行的代码
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步

##################################################################
## subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
Beispiel #6
0
class SensorHubManager(object):
    def __init__(self, path):
        self.path = path
        self._changed_files = []
        self._deleted_files = []
        self._running_scripts = []
        self._lpath = None
        self._running = True
        self._mt = None
        self._threads = {}

    def start_service(self, scripts_to_run):
        self._running_scripts = scripts_to_run
        for s in self._running_scripts:
            self._threads[s] = run_script(os.path.join(self.path, s))
        self._mt = Process(target=self.monitor_thread)
        self._mt.start()

    def stop_service(self):
        self._running = False
        time.sleep(1)  # wait some time in case it can stop
        self._mt.terminate()
        o, e = self._mt.communicate()
        print 'Terminate main thread: out=%s, err=%s' % (o, e)
        for t in self._threads:
            self._threads[t].kill()
            o, e = self._threads[t].communicate()
            print 'Terminate %s: out=%s, err=%s' % (t, o, e)
        print 'All threads are killed'
        time.sleep(1)

    def monitor_thread(self):
        print 'Starting monitor thread'
        while self._running:
            print 'checking workspace...'
            self.running_scripts_check()
            self.sync_with_server()
            self.check_running_files()
            self.action_required()
            time.sleep(CHECKING_SECONDS)
        print 'Monitor thread is terminated'

    def running_scripts_check(self):
        dead_threads = []
        for t in self._threads:
            if not is_running(self._threads[t]):
                dead_threads.append(t)
        for t in dead_threads:
            del self._threads[t]
            self._threads[t] = run_script(os.path.join(self.path, t))
        if dead_threads:
            print 'Start dead threads: %s' % dead_threads

    def sync_with_server(self):
        t = time.localtime()
        self._lpath = 'tempdir-%04d%02d%02d%02d%02d%02d' % (
            t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
        if os.path.exists(self._lpath):
            os.system('rm -rf %s' % self._lpath)
        os.system('git clone -q %s %s' % (REPO, self._lpath))

    def check_running_files(self):
        print 'running scripts: %s' % self._running_scripts
        self._changed_files = []
        self._deleted_files = []
        if not os.path.exists(self.path):
            os.system('cp -r %s %s' % (self._lpath, self.path))
            self._changed_files.extend(valid_files(self.path))
            print 'create new workspace at %s' % self.path
            return
        for f in valid_files(self._lpath):
            srcf = os.path.join(self._lpath, f)
            dstf = os.path.join(self.path, f)
            if not os.path.exists(dstf) or not file_identical(srcf, dstf):
                os.system("cp %s %s" %
                          (os.path.join(self._lpath, f), self.path))
                self._changed_files.append(f)
        dst_files = valid_files(self.path)
        src_files = valid_files(self._lpath)
        if len(dst_files) > len(src_files):
            for f in dst_files:
                if not f in src_files:
                    os.system('rm -rf %s' % os.path.join(self.path, f))
                    self._deleted_files.append(f)
        if self._changed_files:
            print 'changed files: %s' % self._changed_files
        if self._deleted_files:
            print 'deleted files: %s' % self._deleted_files

    def action_required(self):
        for f in self._running_scripts:
            if f in self._changed_files + self._deleted_files:
                if f in self._threads and is_running(self._threads[f]):
                    self._threads[f].kill()
                    print 'script %s is killed' % f
                del self._threads[f]
                if f in self._changed_files:
                    self._threads[f] = run_script(os.path.join(self.path, f))
                    print 'script %s is started' % f
        os.system('rm -rf %s' % self._lpath)
# 需要注意,由于我们创建Pool对象的时候传入参数4,所以在task 4的时候需要等待0 1 2 3中有一个完成,才会执行,不是操作系统的限制。
# 由于Pool默认大小是CPU的核数,所以

## 子进程: 很多时候子进程并不是自身,而是一个外部的进程,创建子进程后还需要控制器输入和输出。

import subprocess
print('nslookup www.python.org')
r = subprocess.call(['nslookup','www.python.org']) # 创建子进程,并执行一个命令
print('Exit code:',r)

## 如果子进程需要输入内容,使用communicate()输入

import subprocess
print("$ nslookup")
p = subprocess.Popen(['nslookup'],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output,err = p.communicate(b'set q=mx\npython.org\nexit\n')# 相当于在命令行执行 nslookup,再手动输入参数
print(output.decode('utf-8'))
print('Exit code:',p.returncode)

## 进程间通信

from multiprocessing import Process,Queue
import os,time,random

def write(q):
    print('Process to write:%s'%os.getpid())
    for v in ['A','B','C','D']:
        print('Put %s to queue...'%v)
        q.put(v)
        time.sleep(random.random())
Beispiel #8
0
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步
##################################################################
# Pool time
from multiprocessing import Pool  # 在 Process 基础上启动大量子进程
# def long_time_task(name):
#     print('Run task %s (%s)...' % (name, os.getpid()))
#     start = time.time(); time.sleep(random.random() * 3)
#     end = time.time(); print('Task %s runs %0.2f seconds.' % (name, (end - start)))
# print('\nPool: \nParent process %s.' % os.getpid())
# p = Pool(4)  # 大小为 4 的线程池, 所以下面 前4个 进程特别快就生成了, 第5个 需要等其中一个运行完
# for i in range(5): p.apply_async(long_time_task, args=(i,))
# p.close()  # 调用 close() 之后就不能继续添加新的 Process 了
# p.join()  # Pool 对象调用 join() 方法会等待所有子进程执行完毕, 调用 join() 之前必须先调用 close()
##################################################################
# subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
Beispiel #9
0
# Address:  222.222.222.222
#
# ����:    python.map.fastly.net
# Addresses:  2a04:4e42:36::223
# 	  151.101.228.223
# Aliases:  www.python.org
#
# Exit code: 0


# 如果子进程还需要输入,则可以通过communicate()方法输入:
import subprocess
print('$ nslookup')
p=subprocess.Popen(['nslookup'],stdin=subprocess.PIPE,stderr=subprocess.PIPE)
# output,err=p.communicate(b'set q=mx\nbaidu.com\nexit\n')
p.communicate(b'set q=mx\npython.org\nexit\n')# 此方法没有返回数 , 所以要输出上面的output err 都是NoneType
# 上面的代码相当于在命令行执行命令nslookup,然后手动输入:
# set q=mx
# python.org
# exit
print('Exit code:',p.returncode);
# $ nslookup
# Ĭ�Ϸ�����:  SJZ-CA6-ZONE2
# Address:  222.222.222.222
#
# > > ������:  SJZ-CA6-ZONE2
# Address:  222.222.222.222
#
# python.org	MX preference = 50, mail exchanger = mail.python.org
# > Exit code: 0
Beispiel #10
0
    while True:
        print('Run child process %s (%s)...' % name,(os.getpid()))
        print('读取数据....')
        value = q.get(True)
        print('Get %s from queue.' % value)
'''
if __name__ == '__main__':
    print('Parent process %s.' % os.getpid())
    q = Queue()
    p = Process(target=run_proc, args=(
        "haha",
        q,
    ))
    #pr = Process(target=read, args=(q,))
    p = Process(target=run_proc, args=(
        '名字',
        q,
    ))
    print('Child process will start.')
    p.start()
    p.join()
    print('Child process end.')

    print("$ nslookup")
    p = subprocess.Popen(['nslookup'],
                         stdin=subprocess.PIPE,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    output, err = p.communicate(b'set q=mx\n10.123.65.56\nexit\n')
    print(output.decode('gbk'))
    print('Exit code:', p.returncode)
Beispiel #11
0
# 退出子线程! 0


"""
	如果需要输入!
"""

print("准备启动一个子线程 ====== 2")

p = subprocess.Popen(['nslookup'], 
					 stdin=subprocess.PIPE, 
					 stdout=subprocess.PIPE, 
					 stderr=subprocess.PIPE)

out ,err = p.communicate(b"set q=mx\npython.org\nexit\n")
# 上面的代码相当于在命令行执行命令nslookup,然后手动输入:
# set q=mx
# python.org
# exit

print(out.decode('utf-8'))
print("Exit code:", p.returncode)

# 准备启动一个子线程 ====== 2
# Server:		192.168.1.1
# Address:	192.168.1.1#53

# Non-authoritative answer:
# python.org	mail exchanger = 50 mail.python.org.
Beispiel #12
0
#############################
import subprocess

print('====== subprocess ======')
# 查询 baidu ip
print('$ nslookup www.baidu.com')
r = subprocess.call(['nslookup', 'www.baidu.com'])
print('Exit code:', r)

# 如果子进程需要输入
print('$ nslookup')
p = subprocess.Popen(['nslookup'],
                     stdin=subprocess.PIPE,
                     stdout=subprocess.PIPE,
                     stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\nwww.baidu.com\nexit\n')
print(output.decode('utf-8'))
print('Exit code:', p.returncode)

#############################
#  进程间通信: Queue 消息队列
#############################
from multiprocessing import Process, Queue

print('====== multiprocessing.Queue ======')


def producer(q):
    for fruit in ['apple', 'tangerine', 'orange', 'banana']:
        print('  > produce "%s"' % fruit)
        q.put(fruit)