def mrbayes():
    print("Running MrBayes...\n\n\n")
    os.chdir(home)
    os.chdir("./scripts")


    mb_exe = os.path.abspath("./mb")

    # append mb block to bottom of each nexus and run on each nexus
    mbblock ='''
    begin mrbayes;
    	set autoclose=yes;
    	lset nst=6 rates=gamma;
    	mcmcp ngen=200000 burninfrac=.25 samplefreq=100 printfreq=1000 diagnfreq=1000 nruns=3 nchains=3 temp=0.40 swapfreq=10;
        constraint parmo = 45 46 47 48 25 26 27 28 29;
        constraint leca = 43 2;
        prset topologypr = constraints (parmo, leca);
    	mcmc;
        sumt;
    end;
    '''
     #constraint parmo = 45 46 47 48 25 26 27 28 29;
    # constraint leca = 43 2;
    # prset topologypr = constraints (parmo, leca);

    os.chdir(home)
    os.chdir(input_dir)

    cmds = []

    for file in glob.glob("*.nexus"):
        #cmd = mb_exe + " -i " + file
        #subprocess.call(cmd , shell=True)

#        print(os.path.abspath(file))
#        with open(file, 'ab') as f:
#            print 'writing' + file
#           f.write(mbblock)
#       cmd = mb_exe + "  " + file
#        cmds.append(cmd)

        with open(file, 'a') as f:
            f.write(mbblock)


        cmd = mb_exe + " -i " + file
        p = Process(target=exec, args=cmd)
        p.start()
        p.join()

    p = Pool(len(cmds))
    p.map(start_proc, cmds)
Example #2
0
 def test_multiprocess_sharing(self):
     api.get_object_by_name('tenant', name='admin')
     p = Process(target=shared_session_check, args=(1,))
     p.start()
     p.join()
     p = Pool(16)
     shared_sessions = []
     for index in range(16):
         shared_sessions.append(index)
     results = p.map(shared_session_check, shared_sessions)
     for result in results:
         assert result == 200
Example #3
0
 def test_multiprocess_sharing(self):
     api.get_object_by_name('tenant', name='admin')
     p = Process(target=shared_session_check, args=(1,))
     p.start()
     p.join()
     p = Pool(16)
     shared_sessions = []
     for index in xrange(16):
         shared_sessions.append(index)
     results = p.map(shared_session_check, shared_sessions)
     for result in results:
         assert result == 200
Example #4
0
def main():

    tones = ''
    with open('769.bin.ambe', 'rt') as dtmf:
        for line in dtmf:
            tones += '0' * 8 + line.rstrip() + '0' * 7

    dtmf_tones = bitarray(tones)

    data_buffer = b''
    silent = bitarray(
        '000000001111100000000001101010011001111110001100111000001')

    sys.stdout.buffer.write(b'.amb')
    sys.stdout.buffer.write(silent.tobytes() * 10 + dtmf_tones.tobytes())

    pipe = Pipe()
    p = Process(target=output_process, args=(pipe[0], dtmf_tones))
    p.start()

    sock = socket(AF_INET, SOCK_DGRAM)
    sock.bind(('127.0.0.1', 31000))

    sys.stdout.buffer.flush()

    data = b''
    with Pool(3) as p:
        while True:
            # data = sys.stdin.buffer.read(9*6)
            #try:
            data = sock.recv(9 * 6)  #, MSG_DONTWAIT)
            #except:
            #pass

            if data:
                data_buffer += data
                if len(data_buffer) > 8:  # need 9 bytes for one ambe72 frame
                    io_data = BytesIO(data_buffer)
                    ambe72_frames = iter(partial(io_data.read, 9), b'')
                    ambe49_DSD_frames = p.map(convert_to_DSD, ambe72_frames)
                    pipe[1].send_bytes(b''.join(ambe49_DSD_frames))
                    data_buffer = b''
            else:
                sleep(0.01)

    p.join()
Example #5
0
from multiprocessing import Process
import os
from multiprocessing import Pool
def info(title):
    print(title)
    print('module name:', __name__)
    print('parent process:', os.getppid())
    print('process id:', os.getpid())

def f(name):
    info('function f')
    print('hello', name)

if __name__ == '__main__':
    info('main line')
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()
def f(x):
    return x*x

if __name__ == '__main__':
    with Pool(5) as p:
        print(p.map(f,[i for i in range(10)]))
Example #6
0
                jobs = []

                for _ in range(8):
                    p = Process(target=generate_data, args=(lock, i))
                    jobs.append(p)
                    p.start()

                for job in jobs:
                    job.join()

            trainExamples = merge_data(i)
            print(len(trainExamples))
            g = Game(8)
            nnet = nn(g)
            nnet.train(trainExamples)
            nnet.save_model(filename="model_auto_" + str(i + 2))
        else:
            break

        print(i, 'one model')

if __name__ == 'x':
    with Pool(8) as p:
        result = p.map(arena_process, range(8))

        win_1 = sum([i[0] for i in result])
        win_2 = sum([i[1] for i in result])

        print(win_1)
        print(win_2)
Example #7
0
# p.join()
# while True:
#     print("死循环")
#     time.sleep(1)

from multiprocessing import Pool


def f(x):
    return x * x


if __name__ == '__main__':
    with Pool(5) as p:
        print(p.map(f, [1, 2, 3]))

from multiprocessing import Process, Manager


def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()


if __name__ == '__main__':
    with Manager() as manager:
        d = manager.dict()
        l = manager.list(range(10))
Example #8
0

def f(name):
    print('hello', name)


if __name__ == '__main__':
    p = Process(target=f, args=('bob', ))
    p.start()
    p.join()

from multiprocessing import Pool

if __name__ == '__main__':
    p = Pool(5)
    print(p.map(findpi, [100, 100, 100]))

import random
import matplotlib.pyplot as plt


class P:
    x_inside = []
    y_inside = []
    x_outside = []
    y_outside = []


def subfindpi(n):
    p = P()
    for i in range(n):
Example #9
0
	(1)
	m.list(列表数据)
	m.dict(字典数据)
	(2)
	with Manager() as m:
		……

5)进程池		Pool模块

	p = Pool(os.cup_count() +1)		#开启多进程之后,每次处理数据只能指定个数个处理
	
	p.close()
	p.join()	#close在join之前

	方法:
		p.map(func, itreable)	#异步处理 itreable ,有返回值,返回值是,每一个func的返回值组成的列表, 自带close和join
		p.apply(func, args)	#同步处理		有返回值,返回值为func的返回值   不需要加close和join
		p.apply_async(func, args, callback)	#异步处理,有返回值,返回一个对象,这个对象有get方法,可以获取func的返回值
									#这个get只能一个一个获取,以我们一般处理完所有线程后再获取数据
									#func的返回值可以作为参数传给callback这个回调函数。回调函数在主进程中执行
		apply函数中的所有进程都为普通进程
		apply_async函数中的所有进程都为守护进程




线程学的东西:threading

GIL:全局解释器锁(只有CPython才有)
	锁的是线程:同一时间只允许一个线程访问CPU      #(没有真正的并行)
Example #10
0
	p2.start()
	p2.join()




if __name__ == '__main__':
	mypi = compute_pi(10000000)
	print ('my pi: {0}, Error: {1}'.format(mypi, mypi - pi))




if __name__ == '__main__':
	with Pool(processes = 5) as p:
		results = p.map(start_function_for_processes, range(200), chunksize = 10)
	print(results)


	with Pool(4) as p:
		pis = p.map(compute_pi, [10000000] * 4 )
		print (pis)
		mypi = sum (pis)/4
		print ('my pi: {0}, Error: {1}'.format(mypi, mypi - pi))
	





Example #11
0
from multiprocessing import Process

__author__ = 'monkov'


def info(title):
    print title
    print 'module name:', __name__
    if hasattr(os, 'getppid'):  # only available on Unix
        print 'parent process:', os.getppid()
    print 'process id:', os.getpid()


def f(name):
    info('function f')
    print 'hello', name


def ff(x):
    return x * x


if __name__ == '__main__':
    info('main line')
    p = Process(target=f, args=('bob',))
    p.start()
    p.join()

    p = Pool(5)
    print(p.map(ff, [1, 2, 3]))
import time
from multiprocessing import Pool

def printNumber(N):
    print("I am using CPU", N,"good")
    time.sleep(1)
    out = (str(N)+' g_ooo_d')
    return out
     
printNumber(3)


if __name__ == '__main__':
    array = [1,2,3]
    p=Pool(4)
    result= p.map(printNumber,array)
    print(result)
    #p.start()
    #p.join()

# or    
if __name__ == '__main__':
    with Pool(5) as p:
        print(p.map(printNumber, [1, 2, 3]))        
 
    
    
    
    
    
 # liao xue feng   
        d.join(1)
        print('d.is_alive()', d.is_alive())
        n.join()

if False:

    cpu_utilize = cpu_count()
    p = Pool(processes=cpu_utilize)  # Define number of CPUs to use

    jobs_2_run = list(range(
        0,
        cpu_utilize,
    ))
    jobs_2_run.reverse()
    print(jobs_2_run)
    p.map(some_function, jobs_2_run)

#cpu_utilize = 32

#if cpu_utilize > cpu_count():
#    cpu_utilize = int(cpu_count())

#print(cpu_utilize)

#number_jobs_2_run = 10

#pool = Pool(processes=int(cpu_utilize))  # Define number of CPUs to use

#with Pool(1) as p:
#    p.map(some_function, jobs_2_run)
Example #14
0
    Args:
        num (int): The specific number we want to print

    Returns:
        None: Result will print to standard output directly

    """
    for _ in range(5):
        time.sleep(0.1)
        print('{}'.format(num), end='')


if __name__ == '__main__':
    # 1. First way: with process
    processes = []
    for i in range(1, 4):
        p = Process(target=print_num, args=(i, ))
        processes.append(p)
        p.start()

    # Join each process
    for process in processes:
        process.join()

    print('')

    # 2. Second way: with Pool
    with Pool(3) as p:
        p.map(print_num, [1, 2, 3])
import timeit
timeit.timeit('function', number = iterations)

MULTIPROCESSING
===========
from multiprocessing import Process, Pool

# spawn 8 processes
for i in range(8):
  p = Process(target=func, args=(var1, var2))
  p.start()
  p.join()

# pool with 8 processes
with Pool(8) as p:
  data = p.map(func, range(1000))
print(data)

FILE IO
==========
file = open('filename.txt')
file.close()
file.read()
file.seek(0)
file.readlines() # list of lines
with open('file.txt', mode='a+') as file:
  file.write('foo')

JSON
=====
import json
Example #16
0
                else:
                    r = mask2rle(predict)
                    encoded_pixels.append(r)
                image_id += 1

    print("Saving submission...")
    sub['EncodedPixels'] = encoded_pixels
    sub.to_csv('submission.csv',
               columns=['Image_Label', 'EncodedPixels'],
               index=False)
    print("Saved.")


# Train model
p = Process(target=train_model)
p.start()
p.join()

# Get valid preds and optimize threshold and min_size
class_params = {}
with multiprocessing.Pool(1) as p:
    class_params = p.map(generate_class_params, [1])[0]
    print("Got back", class_params)

# Generate test predictions
with multiprocessing.Pool(1) as p:
    result = p.map(generate_test_preds, [class_params])
    print(result)

print("done")
# Por medio del método `Pool()` se distribuyen un conjunto de datos entre múltiples procesadores. A través de esta técnica se puede implementar el modelo de programación paralelelo `mapreduce`

# In[23]:

from multiprocessing import Pool


def cuad(n):
    return n * n


if __name__ == "__main__":

    datos = [1, 2, 3, 4, 5]
    p = Pool()
    resultado = p.map(cuad, datos)
    print(resultado)

# In[24]:

from multiprocessing import Pool
import time


def suma(n):
    val = 0
    for x in range(1000):
        val += x * x

    return val
Example #18
0
    
    '''

# 并发计算二维数组每行的平均值
from multiprocessing import Pool
from Python_Develope.self_module.flock import f_mean as f

if __name__ == '__main__':
    x = [
        list(range(10)),
        list(range(20, 30)),
        list(range(50, 60)),
        list(range(80, 90))
    ]
    with Pool(5) as p:  # 创建包含5个进程的进程池
        print(p.map(f, x))  # 并发运行

# Pool对象的方法在不同情况下引发异常的处理方法

import multiprocessing
import sys
from Python_Develope.self_module.flock import mul, plus, f_error, calculate


def test():
    # 创建包含4个进程的进程池
    with multiprocessing.Pool(4) as pool:
        tasks = [(mul, (i, 7)) for i in range(10)] + [(plus, (i, 8))
                                                      for i in range(10)]
        print('Testing error handing:')
        try: