Example #1
0
def main():
    # 0.获取原来要copy的文件夹的名字
    oldFolderName = input('请输入文件夹的名字:')

    # 1.创建一个文件夹
    newFolderNmae = oldFolderName + '复件'
    os.mkdir(newFolderNmae)

    # 2.获取old文件夹中的所有的文件夹名
    fileNames = os.listdir(oldFolderName)

    # 3.使用多进程的方式copy,原文件中所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().queue()

    # 不能把列表给子进程,如果给列表的话,相当于不是多任务执行,而是一个进程把所有的列表都在复制,所以这个用循环,每个文件给一个子进程
    for name in fileNames:
        pool.apply_async(copyFileTask, args=(name, oldFolderName, newFolderNmae, queue))

    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print('\rcopy的进度是:%.2f%%' % (copyRate * 100), end='')
        if num == allNum:
            break
Example #2
0
def main():

    # 0.获取用户要拷贝的文件夹的名字
    oldFolderName = input("请输入要拷贝的文件夹名:")
    # 1.创建一个文件夹
    newFolderName = oldFolderName + "-复件"
    os.mkdir(newFolderName)
    # 2.获取源文件夹中的所有文件名
    fileNames = os.listdir(oldFolderName)
    # 3.使用多进程的方式拷贝源文件夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\r复制文件的进度是:%.2f%%" % (copyRate * 100), end="")
        if num == allNum:
            print("")
            break
    print("%s文件夹拷贝完成" % oldFolderName)
Example #3
0
def main():
    #1.获得用户输入的文件夹名字
    oldFileName = input("请输入需要copy的文件夹名字:")

    #2.创建一个文件夹
    newFileName = oldFileName+"[复件]"
    os.mkdir(newFileName)

    #3.获得old文件中所以的文件名
    fileNames = os.listdir(oldFileName)
    #rint(fileNames)

    #4.使用进程池copy文件内容
    pool = Pool(5)
    queue = Manager().Queue()

    for names in fileNames:
        pool.apply_async(copyFile,(names,oldFileName,newFileName,queue))

    num = 0
    allNum = len(fileNames)
    while num < allNum:
        queue.get()
        num +=1
        cn =num/allNum
        print("\r\rcopy进度为:%.2f%%" % (cn * 100), end="")

    pool.close()
    pool.join()
    print("")
    print("copy完成!")
Example #4
0
def main():
    # 1.首先获取旧的文件夹名字
    oldFolder = input("请输入您想要复制的文件夹名字:")
    # 2.赋予新文件夹新的名字
    newFolder = oldFolder + "-副件"
    os.mkdir(newFolder)  #新建文件夹

    fileName = os.listdir(oldFolder)  #获取文件夹中的所有文件

    p = Pool(5)  #创建进程池
    queue = Manager().Queue()  #进程内部数据共享
    allNum = len(fileName)
    num = 0

    for name in fileName:
        p.apply_async(copy, args=(name, oldFolder, newFolder, queue))

    while True:  #进度条
        queue.get()
        num += 1
        culum = num / allNum
        print("\r---复制的进度是:%d%%" % (culum * 100), end="")
        time.sleep(1)
        if num == allNum:
            break

    p.close()
    p.join()
Example #5
0
def main():

    pool = Pool(10)

    queue = Manager().Queue()

    fr = open('url.txt', 'r')

    lines = fr.readlines()

    for i in lines:

        url = remove_control_chars(i)

        pool.apply_async(savePeopleInformation, args=(url, queue,))

    allnum = len(lines)

    num = 0

    while True:

        print queue.get()

        num += 1

        if num >= allnum:

            fr.close()

            break
def main():
    time1 = time.time()
    # 获取要复制的文件夹名称:
    oldFolderName = input("请输入你要复制的文件夹名称:")

    # 创建新的文件夹:
    newFolderName = oldFolderName + '_复件'
    # print(newFolderName)
    os.mkdir(newFolderName)

    # 获取old文件夹中的所有文件名字:
    filesNames = os.listdir(oldFolderName)
    # print(filesNames)

    # 使用多线程的方式拷贝原文件夹中的文件到新的文件夹中。
    pool = Pool(10)
    queue = Manager().Queue()

    for name in filesNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(filesNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print('\rcopy的进度是:%.2f%%' % (copyRate * 100), end='')
        if num == allNum:
            break
        print('')
    time2 = time.time()
    print('运行时间为:%d s' % (time2 - time1))
    pool.close()
def main():

    #1.获取需要拷贝的文件夹
    old_filename =input('请输入需要拷贝的文件名:')

    #2.创建新的文件名
    new_filename =old_filename+'[复件]'
    os.mkdir(new_filename)

    #3.获取文件夹中的文件列表
    names =os.listdir(old_filename)
    #4.创建进程池
    pool =Pool(5)
    #创建通讯
    q =Manager().Queue()
    #开始copy的时间
    t1 = time.time()
    #5.循环将进程放入进程池中
    for name in names:
        pool.apply_async(copyfile,(name,old_filename,new_filename,q))


    #pool.close()
    #pool.join()
    num =0
    allnum =len(names)
    while num <allnum:
        q.get()
        num +=1
        contern =num/allnum
        print('\r\rcopy文件进度为:%.2f%%'%(contern*100),end='')
    #结束copy的时间
    t2 =time.time()
    print('\n一共copy了%d个文件,总共耗时为:%.2f'%(allnum,(t2-t1)))
def main():
    
    #0. 获取用于要copy的文件夹的名字
    oldFileName = input('请输入文件夹的名字:')

    # 创建一个文件夹
    newFileName = oldFileName + '-复件'
    os.mkdir(newFileName)

    # 获取old文件夹中给所有的文件名字
    fileNames = os.listdir(oldFileName)

    # 使用多进程的方式copy 原文件夹中的文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for fileName in fileNames:
        pool.apply_async(copyFileTask, args=(fileName, oldFileName, newFileName, queue))

    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num/allNum
        print('\rcopy的进度是:%.2f %%'%(copyRate*100), end='')
        if num == allNum:
            break
    pool.close()
    pool.join()
Example #9
0
def main():
    #获取要拷贝的文件夹名字
    oldFileName = input("请输入需要拷贝的文件夹名字:")
    #创建一个文件夹
    newFileName = oldFileName + "-复件"
    #print(newFileName)
    os.mkdir(newFileName)
    #获取old文件夹中的名字
    fileNames = os.listdir(oldFileName)
    #print(fileNames)

    #使用多进程的方式进行拷贝原文夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFileName, newFileName, queue))
    num = 0
    allnum = len(fileNames)
    while True:
        queue.get()
        num += 1

        copyRate = num / allnum
        print("拷贝的进度是%.2f%%" % (copyRate * 100))
        if num == allnum:
            break
    print("已完成你需要拷贝的文件")
Example #10
0
def main():
    #0 获取要copy的文件夹的名字
    oldFolderName = input("请输入文件夹的名字:")

    #1 创建一个文件夹
    newFolderName = oldFolderName + "-复件"
    #print(newFolderName)
    os.mkdir(newFolderName)

    #2 获取old文件夹中的所有文件名字
    filenames = os.listdir(oldFolderName)
    #print(filenames)

    #3 使用多进程的方式copy
    pool = Pool(5)
    queue = Manager().Queue()

    for name in filenames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(filenames)
    while num < allNum:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\rcopy的进度是:%.2f%%" % (copyRate * 100), end="")

    #pool.close()
    #pool.join()
    print("\nCopy Finished...Total num is %d" % num)
def main():
    #0 get old folder's name and path
    oldDirName = input("Please tell me the folder's name: ")
    #print(oldDirName)
    newDirName = oldDirName + "-copy"

    #1 create new dir
    os.mkdir(newDirName)
    #2 get file list from old dir
    fileNames = os.listdir(oldDirName)
    # print(fileNames)
    #3 copy files from old dir to new dir
    # use multiprocessing to process large amounts of files
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFile, args=(name, oldDirName, newDirName, queue))


#	pool.close()
#	pool.join()
    num = 0
    numAll = len(fileNames)
    while True:
        queue.get()
        num += 1
        print("\r proceeing... %.2f%%" % (num / numAll * 100))
        if num == numAll:
            break
Example #12
0
def main():
    #获取需要备份的目录
    oldFolderName = input("请输入需要备份的目录:")
    #创建备份目录
    newFolderName = oldFolderName + "-bak"
    os.mkdir(newFolderName)
    #获取源目录中的文件名列表
    filenames = os.listdir(oldFolderName)
    #通过多进程的方式拷贝文件
    filenames_queue = Manager().Queue()
    process_pool = Pool(5)
    for name in filenames:
        process_pool.apply_async(copy_file_content,
                                 args=(name, oldFolderName, newFolderName,
                                       filenames_queue))

    num = 0
    allnum = len(filenames)
    while True:
        filenames_queue.get()
        num += 1
        rate = num / allnum
        print("\r当前备份进度为:%.2f%%" % (rate * 100), end="")
        if num == allnum:
            break

    process_pool.close()
    process_pool.join()
def __main__():
    #1.获取要复制的文件夹名称
    old_forder_name = input("请输入要复制的文件夹名字:")

    #2.创建一个新的文件夹
    new_forder_name = old_forder_name + "-复件"
    os.mkdir(new_forder_name)

    #print(new_forder_name)

    #3.获取old forder里面所有文件的名
    file_names = os.listdir(old_forder_name)
    #print(file_names)

    #4.使用多进程将源文件夹里面的文件 copy 到新文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in file_names:
        pool.apply_async(copy_file,
                         args=(name, old_forder_name, new_forder_name, queue))

    num = 0
    all_num = len(file_names)
    print("-" * 50)
    while True:
        queue.get()
        num += 1
        copy_rate = num / all_num
        print("\r当前copy的进程是:%d %%" % (copy_rate * 100), end="")
        if num == all_num:
            print("")
            break
Example #14
0
def main():

    # 取要copy的文件夹名字
    old_folder_name = input("请输入要copy的文件夹名:")

    # 创建一个文件夹
    new_folder_name = old_folder_name + "-[复件]"
    os.mkdir(new_folder_name)

    # 获取老文件夹所有的文件名字
    old_folder_list = os.listdir(old_folder_name)

    # 使用多进程方式copy源文件所有文件到nw文件夹
    pool = Pool(5)
    queue = Manager().Queue()

    for file_name in old_folder_list:
        pool.apply_async(copy_file_task,
                         args=(file_name, old_folder_name, new_folder_name,
                               queue))

    num = 0
    all_num = len(old_folder_list)
    while num < all_num:
        queue.get()
        num += 1
        copy_rate = num / all_num
        print("\rcopy的进度为:%.2f%%" % (copy_rate * 100), end="")
    print("\n已完成copy.....")
    pool.close()
    pool.join()
def main():

    #0.获取用户要copy的文件夹的名字
    oldFolderName = input("请输入文件夹的名字")

    #1,创建一个文件夹
    newFoldername = oldFolderName + "-附件"

    #print(newFoldername)
    os.mkdir(newFoldername)

    #2,获取old文件夹中的所有的文件名字
    fileNames = os.listdir(oldFolderName)
    #print(fileNames)

    #3,使用多进程的方式copy 源文件夹中所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFoldername, queue))
    num = 0
    allnum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allnum
        print("\rcopy的进度是%.2f%%" % (copyRate * 100), end="")
        if num == allnum:
            print('已完成COPY')
            break
Example #16
0
def main():
    # 0.获取要copy的文件夹的名字
    oldFolderName = input("请输入文件夹的名字:")

    # 1. 创建一个文件夹
    newFolderName = oldFolderName + "-复件"
    # print(newFolderName)
    os.mkdir(newFolderName)

    # 2. 获取old文件夹所有文件名字
    fileNames = os.listdir(oldFolderName)
    print(fileNames)

    # 3.使用多进程方式copy,原文件夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(fileNames)
    while num <= allNum:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\rcopy的进度是:%。2f" % copyRate * 100, end="")

    pool.close()
    pool.join()
def main():
    #0.获取用户要copy的文件夹的名字
    oldFolderName = input('请输入文件夹的名字:')
    #print(oldFolderName)
    #1.创建一个文件夹
    newFolderName = oldFolderName + '-复件'
    #print(newFolderName)
    os.mkdir(newFolderName)

    #2.获取old文件夹中的所有的文件名字
    fileNames = os.listdir(oldFolderName)
    #print(fileNames)

    #3.使用多进程的方式copy,原文件夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))
    # pool.close()
    # pool.join()
    num = 0
    allNum = len(fileNames)
    while num < allNum:
        queue.get()
        num += 1
        copyRate = num / allNum
        print('\rcopy的进度是:%.2f%%' % (copyRate * 100), end='')

    print('\n 已完成copy')
Example #18
0
def main():
	#获取用户copy文件夹的名字
	oldfolername=input("文件夹:")
	newfolername=oldfolername+"-复件"

	#获取old文件夹中所有的文件
	filenames=os.listdir(oldfolername)

	#创建一个文件夹
	os.mkdir(newfolername)



	po=Pool(3)
	q=Manager().Queue()
	for name in filenames:
		po.apply_async(copytask,(name,oldfolername,newfolername,q))

	num=0
	allnum=len(filenames)
	while num<allnum:
		q.get()
		num=num+1
		print("\r%.2f%%"%(num/allnum*100))
	print("copy end")
def main():
    try:
        oldFolderName = input("请输入要复制的文件夹名:")
        newFolderName = oldFolderName + "-复件"

        #print("复制成功后的文件夹名为:%s"%newFolderName)
        os.mkdir(newFolderName)
        fileNames = os.listdir(oldFolderName)
    except Exception as result:
        if os.path.exists(newFolderName):
            print("已捕获到异常信息如下:")
            print(result)
    pool = Pool(5)
    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,
                         (name, oldFolderName, newFolderName, queue))

    #pool.close()
    #pool.join()
    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\r复制的进度为:%.2f%%" % (copyRate * 100), end="")
        time.sleep(0.1)
        if num == allNum:
            break
    print("\n已完成复制...")
Example #20
0
def main():
    oldFolderName = "/home/lozzow/PycharmProjects/Python/Day_05"
    newFolderName = oldFolderName + "-复件"
    os.mkdir(newFolderName)
    # 获取旧文件夹所有的文件名字
    fileNames = os.listdir(oldFolderName)  # 使用多进程的方式拷贝文件
    pool = Pool(5)
    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(
                             name,
                             oldFolderName,
                             newFolderName,
                             queue,
                         ))

    pool.close()
    pool.join()

    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\r copy的进度是%.2f%%" % (copyRate * 100), end="")
        if num == allNum:
            break
    print("完成拷贝")
Example #21
0
def main():
    '''
    多进程copy文件:
    :return:
    '''
    #1.选择要拷贝的文件夹:
    srcFolder = input("请输入要拷贝的文件夹:")
    #2.创建一个同名文件夹+复件
    newFolder = "复件"+srcFolder
    os.mkdir(newFolder)
    #3.获取文件夹中的文件
    files = os.listdir(srcFolder)
    #4.利用多进程进行拷贝
    pool = Pool(6)
    q = Manager().Queue()
    #添加任务:
    for file in files:
        pool.apply_async(copy,args=(file,srcFolder,newFolder,q))
    #pool.close()
    #pool.join()
    #主进程中计算copy进度:
    num = 0
    allNum = len(files)
    while True:
        q.get(True)
        num+=1
        copyRate = num / allNum
        print("\nCopy的进度是:%.2f%%"%(copyRate*100),end="")
        if num==allNum:
            break
Example #22
0
    def multi_down(self):
        q = Manager().Queue()
        p = Pool(20)  # 创建进程池
        for num in range(self.max_pics):
            p.apply_async(self.down, args=(
                num,
                q,
            ))
        print("----start----")
        print('下载...', end='')
        p.close()
        # p.join()
        comle_count = 0
        while True:
            q.get()  # 做阻塞用,保证每个线程完成后再做完成率计算
            comle_count += 1
            # print(comle_count)
            # print(type(comle))
            down_rate = comle_count / self.max_pics * 100
            print('\r【%s】已经下载完成%0.2f%%(完成%d张,还剩%d张)...' %
                  (self.group_name, down_rate, comle_count,
                   self.max_pics - comle_count),
                  end=' ')
            if down_rate >= 100:
                print('\r【%s】已经下载完成(%d张).' % (self.group_name, self.max_pics))
                break

        print("----end----")
Example #23
0
def main():
    # o.获取文件夹的名字
    oldFolderName = input("请输入您要拷贝的文件夹名:")

    # 1.创建一个文件夹
    newFolderName = oldFolderName + "-复件"
    # print(newFolderName)
    os.mkdir(newFolderName)
    print("创建文件夹成功!")
    # 2.获取old文件夹中所有的文件名
    p = os.listdir(oldFolderName)
    # print(p)
    # 3.使用多进程拷贝

    pool = Pool(5)
    queue = Manager().Queue()
    for name in p:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allnum = len(p)
    while True:
        queue.get()
        num += 1
        copyRate = num / allnum
        print("\rcopy的进度是:%.2f%%" % (copyRate * 100), end="")
        if num == allnum:
            break

    # pool.close()
    # pool.join()

    print("\n已完成copy!")
    '''
Example #24
0
def main():

    oldFileName = input("请输入旧的文件夹名字:")
    newFileName = oldFileName + "复件"
    #print(newFileName)
    os.mkdir(newFileName)

    filenames = os.listdir(oldFileName)
    print(filenames)
    pool = Pool(5)
    queue = Manager().Queue()
    for name in filenames:
        print("文件名是%s" % name)
        pool.apply_async(copyFile,
                         args=(name, oldFileName, newFileName, queue))
    pool.close()

    allNums = len(filenames)
    num = 0
    while True:
        queue.get()
        num += 1
        copyrate = num / allNums * 100
        print("\r%.2f%%" % copyrate)
        time.sleep(0.01)
        if num == allNums:
            break
    print("copy完成...")
Example #25
0
def main():

    #用户输入当前目录下需要拷贝的文件夹名
    oldFolderName = input("请输入需要拷贝的文件夹的名字:")

    #创建新文件夹
    newFolderName = oldFolderName + "副本"
    os.mkdir(newFolderName)

    #获取old文件夹中所有的文件名字 
    fileNames = os.listdir(oldFolderName)

    #多线程拷贝文件
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFile,args=(name,oldFolderName,newFolderName,queue))

    #执行时判断进程,%在输出需要用两个,因为转义字符
    num = 0 
    allFilenum = len(fileNames)
    while num<allFilenum:
        queue.get()
        num += 1
        copyRate = num/allFilenum 
        print("\r进度:%.2f%%"%(copyRate*100),end="")
        # end="表示不换行"

    print("已完成")
Example #26
0
def main():
    # 创建一个文件夹
    new_folder_name = old_folder_name + "-复件"
    # print(new_folder_name)
    os.mkdir(new_folder_name)
    # 获取old文件夹中的所有的文件的名字
    file_names = os.listdir(old_folder_name)
    # print(file_names)

    # 使用多进程的方式copy原文件夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in file_names:
        pool.apply_async(copy_file_task,
                         args=(name, old_folder_name, new_folder_name,
                               queue))  # name后面要加逗号,此时才是元祖

    num = 0
    all_num = len(file_names)
    while num <= all_num:
        queue.get()
        num += 1
        copy_rate = num / all_num
        print("\rcopy的进度是:%.2f" % (copy_rate * 100), end="")

    print("\n已完成copy--------")
Example #27
0
def main():
    # 0. 输入要拷贝的文件夹名
    oldFolderName = input("请输入文件夹名称 :")

    # 1. 创建一个文件夹
    newFolderName = oldFolderName + '-复件'
    os.mkdir(newFolderName)

    # 2. 获取 old 文件夹中的所有的文件名字
    fileNames = os.listdir(oldFolderName)
    print(fileNames)

    # 3. 使用多进程的方式 copy 原文件夹中的所有文件
    pool = Pool(5)
    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(oldFolderName, newFolderName, name, queue))

    num = 0
    allNum = len(fileNames)
    while num < allNum:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("拷贝进度 %.2f %%" % (copyRate * 100), end='')

    print("\n已经完成")

    pool.close()
    pool.join()
def main():
    # 0. 获取要copy文件夹的名字
    oldFolderName = input('请输入要拷贝的文件夹名字:')
    newFolderName = oldFolderName + '-复件'

    # 1. 创建一个文件夹
    # print(newFolderName)
    os.mkdir(newFolderName)

    # 2. 获取old文件夹中的所有文件的名字
    file_names = os.listdir(oldFolderName)
    # print(file_names)

    # 3. 使用多进程方式copy原文件夹中的文件到新文件夹中
    po = Pool(5)
    queue = Manager().Queue()

    for name in file_names:
        po.apply_async(copy_file_task,
                       args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(file_names)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print('\rcopy的进度是:%.2f%%' % (copyRate * 100), end='')
        if num == allNum:
            break

    po.close()
    po.join()
    print('')
Example #29
0
def main():
    #0.获取用户要copy的文件夹的名字
    oldFolderName = input("请输入文件夹的名字:")

    #1.创建一个新的文件夹。
    newFolderName = oldFolderName + "-复件"
    #print(newFolderName)
    os.mkdir(newFolderName)

    #2.获取old文件夹中所有的文件的名字。
    fileNames = os.listdir(oldFolderName)
    #print(fileNames)
    #3.使用多进程的方式copy原文件夹中的所有文件到新的文件夹中
    pool = Pool(5)
    queue = Manager().Queue()

    for name in fileNames:
        pool.apply_async(copyFileTask,
                         args=(name, oldFolderName, newFolderName, queue))

    num = 0
    allNum = len(fileNames)
    while True:
        queue.get()
        num += 1
        copyRate = num / allNum
        print("\rcopy的进度是:%.2f%%" % (copyRate * 100), end="")
        if num == allNum:
            break
    print("\n已完成c o p y!!!")
Example #30
0
def main():

    # 0. 获取用户要 copy 文件夹的名字
    oldFolderName = input("请输入文件夹的名字:")

    # 1. 创建一个文件夹
    newFolderNmae = oldFolderName + "-复件"

    #print(newFolderNmae)
    os.mkdir(newFolderNmae)

    # 2. 获取old文件夹中的所有文件的名字
    fileNames = os.listdir(oldFolderName)

    #print(fileNames)

    # 3. 使用多进程的方式copy原文件夹中的所有文件到新的文件中
    pool = Pool(5)

    queue = Manager().Queue()
    for name in fileNames:
        pool.apply_async(copyFileTask,args=(name,oldFolderName,newFolderNmae,queue))

    num = 0
    allNum = len(fileNames)
    copyRate = 0
    while num<allNum:
        queue.get()
        num += 1
        copyRate = num/allNum
        print("\rcopy的进度是:%.2f%%"%(copyRate*100),end="")

    print("\n已完成 copy....") 
    def run_finetuning(self, epochs):
        """
        Run the train and test error evaluation and the backpropagation using conjugate gradient to optimize the
        weights in order to make the DBN perform better.

        @param epochs: The number of epochs to run the finetuning for.
        """
        self.train_error = {}
        self.test_error = {}
        dbn.save_dbn(self.weight_matrices_added_biases, self.train_error, self.test_error, self.fout())
        for epoch in range(epochs):
            self.fout('Backprop: Epoch ' + str(epoch + 1))
            result_queue = Manager().Queue()
            w_queue = Manager().Queue()

            # Start backprop process
            proc = Process(target=self.backpropagation, args=(epoch, self.weight_matrices_added_biases, w_queue,))
            proc.start()
            # Start error eval processes
            evaluations = []
            evaluations.append((
                self.weight_matrices_added_biases, epoch, True, data_processing.get_batch_list(training=True),
                result_queue,
                self.binary_output))
            evaluations.append((
                self.weight_matrices_added_biases, epoch, False, data_processing.get_batch_list(training=False),
                result_queue, self.binary_output))
            p = Pool(cpu_count())
            p.map_async(error, evaluations)
            p.close()

            # Join multiple processes
            p.join()
            proc.join()
            self.weight_matrices_added_biases = w_queue.get()

            # Print and save error estimates
            for e in range(2):
                out = result_queue.get()
                if out[0]:
                    self.train_error[epoch] = out[2]
                    self.fout(out[1])
                else:
                    self.test_error[epoch] = out[2]
                    self.fout(out[1])

            # Save DBN
            dbn.save_dbn(self.weight_matrices_added_biases, self.train_error, self.test_error, self.fout())
Example #32
0
 def upload_test(self):
     start_time = time.time()
     q = Manager().Queue()
     plist = []
     for i in range(self.upload_user):
         proc = Process(target=self.upload_one_user, args=(q,))
         plist.append(proc)
     for proc in plist:
         proc.start()
     for proc in plist:
         proc.join()
     while True:
         if q.empty():
             break
         else:
             if q.get() == 0:
                 self.upload_success += 1
             else:
                 self.upload_fail += 1
     use_time = time.time() - start_time
     table = PrettyTable(["key", "value"])
     table.add_row(["One File Size (M)", self.upload_file_size])
     table.add_row(["All File Size (M)", self.upload_file_size * self.upload_number * self.upload_user])
     table.add_row(["Process Count(user)", self.upload_user])
     table.add_row(["Upload Count", self.upload_number * self.upload_user])
     table.add_row(["Interval Time(s)", self.upload_time])
     table.add_row(["Success count", self.upload_success])
     table.add_row(["Fail count", self.upload_fail])
     table.add_row(["Success ratio (%)",
                    (round(self.upload_success / float(self.upload_number * self.upload_user), 4) * 100)])
     table.add_row(["Use time (s)", "%.2f" % use_time])
     print table
Example #33
0
class InMemoryStorage:
    def __init__(self):
        self.storage = Manager().dict()

    def keys(self):
        return self.storage.keys()

    def set_val(self, key, val):
        self.storage[key] = val

    def get_val(self, key):
        return self.storage[key]

    def append_val(self, key, val):
        # self.storage.setdefault(key, []).append(val)  # 不适用于Manager()

        # t=self.storage.setdefault(key, []) # !!!
        # t.append(val)
        # self.storage[key]=t
        if key in self.storage:
            self.storage[key]+=[val]
        else:
            self.storage[key]=[val]

    def get_list(self, key):
        return self.storage.get(key, [])
Example #34
0
  def _feed_helper(self, feed_name, feed_config):
    feed_provider_name = feed_config['name']
    feed_func = FEEDS_MAPPING.get(feed_provider_name)

    LOG.debug("Geting feeds from '%s'", feed_name)

    if not feed_func:
      LOG.warning('Unknown feed provider %s', feed_provider_name)
      return

    feed_result = Manager().dict()
    feed_func_timeout = feed_config.get('timeout') or feed_func.get('timeout')

    p = Process(target=feed_func.get('func'), args=(feed_config, feed_result))
    p.start()
    p.join(feed_func_timeout)

    if p.is_alive():
      p.terminate()
      status = 255
    else:
      status = p.exitcode

    if status:
      LOG.error("Fail to execute feed provider '%s' for '%s', exitcode '%d'", feed_provider_name, feed_name, status)
      return None
    else:
      return feed_result.get('sensor_data', None)
Example #35
0
def behavior_stat_to_db(param=Const.TRAIN, other=Const.OTHER, process_num=12):
    global left_time
    global X, user_item_pair
    print 'Counting behavior......... '
    x_data = Data(other.PROCESSED_DATA_PATH)
    X = x_data.query(select_table_sql(param.X_TABLE), index=['user_id', 'item_id'])
    X = X['behavior_type']

    set_index = list(set(X.index))
    user_item_pair = pd.DataFrame(set_index, columns=['user_id', 'item_id'])
    print user_item_pair
    user_item_len = len(user_item_pair)

    test_num = user_item_len
    core_num = process_num

    # queue
    queue = Manager().Queue()
    q_result = Manager().Queue()

    # split data to different processes
    interval = test_num/core_num
    left_time = interval
    task_list = [i*interval for i in range(core_num)]
    task_list.append(test_num)

    ####################################
    start_CPU = time.clock()
    start_time = time.time()

    p = Pool(core_num+1)

    for i in range(core_num):
        p.apply_async(m_ui_pc, args=(task_list[i], task_list[i+1], queue))

    p.apply_async(read_queue, args=(queue, q_result, core_num))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')

    value = q_result.get(True)
    final = value.sort_index()
    print final

    end_CPU = time.clock()
    end_time = time.time()
    print '%f CPU second' % (end_CPU - start_CPU)
    print '%f real second' % (end_time - start_time)
    print

    temp = Data(other.PROCESSED_DATA_PATH)
    final.iloc[0:test_num].to_sql(param.X_STATI_BEHAVIOR_TABLE, temp.conn, if_exists='replace')
    show = pd.read_sql_query(select_table_sql(param.X_STATI_BEHAVIOR_TABLE), temp.conn, index_col='index')
    print show

    x_data.close()
    temp.close()
Example #36
0
def do_git_srpm_import(opts, src_filepath, task, tmp_dir):
    # should be run in the forked process, see:
    # - https://bugzilla.redhat.com/show_bug.cgi?id=1253335
    # - https://github.com/gitpython-developers/GitPython/issues/304

    result_dict = Manager().dict()
    proc = Process(target=actual_do_git_srpm_import, args=(opts, src_filepath, task, tmp_dir, result_dict))
    proc.start()
    proc.join()
    if result_dict.get("hash") is None:
        raise PackageImportException("Failed to import the source rpm: {}".format(src_filepath))

    return str(result_dict["hash"])
Example #37
0
    def run(self, show_errors):
        tests_path = "%s/%s" % (self.test_folder, self.test_subfolder)
        test_total = 0
        test_fails = 0

        print("Running test suite: '%s'" % self._name)
        print("==================================================")

        q = Manager().Queue()
        p_list = []
        
        pool = Pool(processes=num_cores())

        start_time = time.time()
        # Loop through test cases (files)
        for test_name in os.listdir(tests_path):
            if not test_name.startswith("J"):
                continue
            test_path = os.path.join(tests_path, test_name)
            test_total += 1

            # Run joosc (i.e., run the test).
            p = pool.apply_async(func=run_joosc, args=(self._joosc_options, test_path, q, ))
            p_list.append(p)

        for p in p_list:
            ret = q.get(5)
            
            if ret[0] < 0:
                print("#\nUNEXPECTED ERROR: %s" % os.path.split(ret[1])[1])

            elif self.is_correct_result(ret[0], os.path.split(ret[1])[1]) == False:
                test_fails += 1
                print("#\nTEST FAIL %d: %s" % (test_fails, os.path.split(ret[1])[1]))
                if self.verbose:
                    print("OUTPUT:")
                    print("==================================================")
                    print(ret[2])
                    print("==================================================")

            else:
                sys.stdout.write('.')
                sys.stdout.flush()

        # Done tests
        print("\n==================================================")
        print("Test run successful.  %s seconds" % (time.time() - start_time))
        print("{} test(s) ran. {} test(s) failed.".format(test_total, test_fails))
    def remote_call(self, rpc_code, *args, **kwargs):
        """
        Make synchronous remote procedure calls on the Orchestrator.

        :param rpc_code: RPC code.
        :type rpc_code: int

        :returns: Depends on the call.
        :rtype: \\*
        """

        # Create the response queue.
        try:
            response_queue = Manager().Queue()

        # If the above fails we can assume the parent process is dead.
        except:
            exit(1)

        # Send the RPC message.
        self.send_msg(message_type = MessageType.MSG_TYPE_RPC,
                      message_code = rpc_code,
                      message_info = (response_queue, args, kwargs),
                          priority = MessagePriority.MSG_PRIORITY_HIGH)

        # Get the response.
        try:
            raw_response = response_queue.get()  # blocking call

        # If the above fails we can assume the parent process is dead.
        except:
            exit(1)

        # Return the response, or raise an exception on error.
        success, response = raw_response
        if not success:
            exc_type, exc_value, tb_list = response
            try:
                sys.stderr.writelines(
                    format_exception_only(exc_type, exc_value) )
                sys.stderr.writelines(
                    format_list(tb_list) )
            except Exception:
                pass
            raise response[0], response[1]
        return response
Example #39
0
 def upload_begin(self):
     plist = []
     q = Manager().Queue()
     with open(self.list_path, 'r') as fp:
         for i in fp:
             if not i:
                 break
             md5_crc32 = i.strip()[:41]
             if md5_crc32 not in self.tmp_list and len(md5_crc32) == 41:
                 self.tmp_list.append(md5_crc32)
                 self.upload_num += 1
     print self.upload_num
     for md5_crc32_list in self.chunks(self.tmp_list, self.work_count):
         proc = Process(target=self.upload_file, args=(q, md5_crc32_list,))
         plist.append(proc)
     for proc in plist:
         proc.start()
     for proc in plist:
         proc.join()
     while True:
         if q.empty():
             break
         else:
             r = q.get()
             if r == 0:
                 self.success += 1
             elif r == 1:
                 self.fail += 1
             elif r == 2:
                 self.download_fail += 1
             else:
                 pass
     use_time = time.time() - self.start_time
     table = PrettyTable(["key", "value"])
     table.add_row(["Upload Count", len(set(self.tmp_list))])
     table.add_row(["Success count", self.success])
     table.add_row(["Fail count", self.fail])
     table.add_row(["Download Fail", self.download_fail])
     table.add_row(["Use time (s)", "%.2f" % use_time])
     print table
Example #40
0
def startServer(host, port, options):
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    s.bind((host, port))
    s.listen(0)

    queue = Manager().Queue()
    while True:
        print "main: waiting for connection"
        conn, addr = s.accept()
        print 'main: Connected by', addr

        data = conn.recv(1024)
        print 'received port request'
        p = Process(target = serverNewClient, args = (queue, options, ))
        p.start()
        while queue.empty():
            time.sleep(0.05)
            print "queue is still empty"
        port = queue.get()
        conn.sendall(str(port) + '\r\n')
        print "assigned port %d to new client" % port
Example #41
0
class Reader(object):
    """ Play audio files.

    """

    def __init__(self):
        """ Player(text, **kwargs) -> Speak text.

        """

        self._text = ''

        # Setup the msg_dict for sending messages to the child process.
        self._msg_dict = Manager().dict()

        # Create a pipe for sending and receiving messages.
        self._control_conn, self._player_conn = Pipe()

    def __str__(self) -> str:
        """ The information about the open file.

        """

        return self._text

        # Wait for the stream to open.
        while 'info' not in self._msg_dict: pass

        # Return the info string.
        return self._msg_dict.get('info', '')

    def __repr__(self) -> str:
        """ __repr__ -> Returns a python expression to recreate this instance.

        """

        repr_str = ''  # "filename='%(_filename)s'" % self.__dict__

        return '%s(%s)' % (self.__class__.__name__, repr_str)

    def __enter__(self):
        """ Provides the ability to use pythons with statement.

        """

        try:
            return self
        except Exception as err:
            print(err)
            return None

    def __exit__(self, exc_type, exc_value, traceback):
        """ Stop playback when finished.

        """

        try:
            self.stop()
            self._control_conn.close()
            self._player_conn.close()
            return not bool(exc_type)
        except Exception as err:
            print(err)
            return False

    def __del__(self):
        """ Stop playback before deleting.

        """

        pass

    def __len__(self):
        """ The length of the file if it has one.

        """

        return self.length

    def playing_wrapper(func):
        """ Wrap methods and only call them if the stream is playing

        """

        @functools_wraps(func)
        def wrapper(self, *args, **kwargs):
            """ Check if stream is playing and if it is then call func
            otherwise print a message and exit.

            """

            if not self.playing:
                print("%(filename)s is not playing." % self._msg_dict)
                return None

            return func(self, *args, **kwargs)

        return wrapper

    def _play_proc(self, msg_dict: dict, pipe: Pipe):
        """ Player process

        """

        # Open the file to play.
        with EspeakText(**msg_dict) as fileobj:

            # Put the file info in msg_dict.
            # msg_dict['info'] = str(fileobj)
            msg_dict['length'] = fileobj.length

            # Open an audio output device that can handle the data from
            # fileobj.
            # with AudioDevice(rate=22050, channels=1) as device:

            device = AudioDevice(rate=22050, channels=1)
            try:

                # Set the default number of loops to infinite.
                fileobj.loops = msg_dict.get('loops', -1)

                # Initialize variable.
                buf = b'\x00' * device.buffer_size
                written = 0

                # Loop until stopped or nothing read or written.
                while msg_dict['playing'] and (buf or written):
                    # Keep playing if not paused.
                    if not msg_dict.get('paused', False):
                        # Re-open the device if it was closed.
                        if device.closed:
                            device = AudioDevice(rate=22050, channels=1)

                        # Read the next buffer full of data.
                        buf = fileobj.readline()

                        # Write buf.
                        written = device.write(buf)
                    else:
                        # Close the device when paused and sleep to
                        # open the audio for another process and
                        # save cpu cycles.
                        if not device.closed:
                            device.close()

                        time_sleep(0.05)

                        # Write a buffer of null bytes so the audio
                        # system can keep its buffer full.
                        # device.write(b'\x00' * device.buffer_size)

                    # Get and process any commands from the parent process.
                    if pipe.poll():
                        # Get the data into temp.
                        command = pipe.recv()

                        if 'getposition' in command:
                            pipe.send(fileobj.position)
                        elif 'setposition' in command:
                            fileobj.position = command['setposition']
            except Exception as err:
                print(err)
            finally:
                if not device.closed:
                    device.close()

        # Set playing to False for the parent.
        msg_dict['playing'] = False

    def read(self, text: str, **kwargs):
        """ Read the text.

        """

        self._text = text
        self._msg_dict['text'] = text
        self._msg_dict.update(kwargs)

        # After opening a new file stop the current one from playing.
        self.stop()

        # Pause it.
        self.pause()

        # Start it playing so seeking works.
        self.play()

    def play(self):
        """ play() -> Start playback.

        """

        if not self._msg_dict.get('playing', False):
            # Set playing to True for the child process.
            self._msg_dict['playing'] = True

            # Open a new process to play a file in the background.
            self._play_p = Process(target=self._play_proc,
                                   args=(self._msg_dict, self._player_conn))

            # Start the process.
            self._play_p.start()
        elif self._msg_dict.get('paused', True):
            # Un-pause if paused.
            self._msg_dict['paused'] = False

    def stop(self):
        """ stop() -> Stop playback.

        """

        if self._msg_dict.get('playing', False):
            # Stop playback.
            self._msg_dict['playing'] = False

            # Wait for the player process to stop.
            self._play_p.join()

            # Un-Pause.
            self._msg_dict['paused'] = False

    def pause(self):
        """ pause() -> Pause playback.

        """

        # Pause playback.
        self._msg_dict['paused'] = True

    @property
    def paused(self) -> bool:
        """ True if playback is paused.

        """

        return self._msg_dict.get('paused', False)

    @property
    def playing(self) -> bool:
        """ True if playing.

        """

        return self._msg_dict.get('playing', False)

    @property
    def length(self) -> int:
        """ Length of audio.

        """

        return self._msg_dict.get('length', 0)

    @property
    @playing_wrapper
    def position(self) -> int:
        """ Current position.

        """

        self._control_conn.send('getposition')
        return self._control_conn.recv()

    @position.setter
    @playing_wrapper
    def position(self, value: int):
        """ Set the current position.

        """

        self._control_conn.send({'setposition': int(value)})

    @playing_wrapper
    def tell(self) -> int:
        """ tell -> Returns the current position.

        """

        return self.position
Example #42
0
def main():
    import argparse
    import logging
    import os
    import yaml

    parser = argparse.ArgumentParser()
    parser.add_argument('classifier')
    parser.add_argument('--postprocess', action="store_true",
                        help='Run postprocessing, close blobs and remove noise')
    parser.add_argument('videolist', help='A file listed all the videos to be indexed')
    parser.add_argument('cores', type=int, help='Number of processes of paralellism')
    args = parser.parse_args()

    logging.basicConfig(level=logging.WARNING,
                        format="%(asctime)s - %(message)s")

    classifier = zipfile.ZipFile(args.classifier)
    global forest0, svmmodels, training_bosts, hist0
    forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \
        load_from_classifier(classifier)
    classifier.close()

    KEY_FRAME_PERIOD = 2 # in seconds
    #queue = Queue.Queue()
    #data_queue = Queue.Queue()
    queue = Manager().Queue()
    data_queue = Manager().Queue()

    for processes in [4]:    
        video_list = open(args.videolist, 'r')
        log_file = open('statistics%d.txt' % processes, 'w')

        fps = 0
        fps_count = 0

        for video_file in video_list:
            video_file = video_file.strip()
            name = os.path.splitext(video_file)[0]
            file_path = os.path.join(VIDEO_RESOURCE, video_file)
            log_file.write(file_path+"\n")

            capture = cv.CaptureFromFile(file_path)
            frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
            total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
            log_file.write("frame rate: %.3f, total frames: %d\n" % (frame_rate, total_frames)) 

            start_time0 = time.time()
            key_frame_counter = 0    
            frame = cv.QueryFrame(capture)
            os.makedirs("tmp")
            while frame:
                cv.SaveImage("tmp/" + name + "%d.png" % key_frame_counter, frame)
                for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)):
                    frame = cv.QueryFrame(capture)
                key_frame_counter += 1
            for i in xrange(key_frame_counter):
                 data_queue.put(i)

            start_time = time.time()

            ps = []
            for group in xrange(processes):
                p = Process(target = calculate_class, args=(name, queue, data_queue, ))
                #p = threading.Thread(target = calculate_class, args=(name, queue, data_queue, ))
                p.start()
                ps.append(p)
            for p in ps:
                p.join()

            elapse_time = time.time() - start_time

            accuracy_file = open('360.txt', 'w')
            while not queue.empty():
                q_entry = queue.get()
                frame_counter = q_entry[0]
                ILP = q_entry[1]
                accuracy_file.write('%d' % frame_counter)
                for class_index, score in enumerate(ILP):
                    accuracy_file.write(',%.02f' % score)
                accuracy_file.write('\n')
            accuracy_file.close()

            os.system("rm -rf tmp")

            log_file.write("decoding time: %.2f, total time: %.2f, key frames: %d, frame per sec: %.3f\n" \
                % (start_time - start_time0, elapse_time, key_frame_counter, key_frame_counter / elapse_time))
            fps += key_frame_counter / elapse_time
            fps_count += 1

            #time.sleep(10)

        video_list.close()
        log_file.write("average fps: %.3f\n" % (fps/fps_count))
        log_file.close()
Example #43
0
def main():
    import argparse
    import logging
    import os
    import yaml
    import cv

    global processes
    global forest0, svmmodels, training_bosts, hist0

    parser = argparse.ArgumentParser()
    parser.add_argument('classifier')
    parser.add_argument('cores', type=int, help='Number of processes of paralellism')
    parser.add_argument('--postprocess', action="store_true",
                        help='Run postprocessing, close blobs and remove noise')
    args = parser.parse_args()

    logging.basicConfig(level=logging.WARNING,
                        format="%(asctime)s - %(message)s")

    classifier = zipfile.ZipFile(args.classifier)
    forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \
        load_from_classifier(classifier)
    classifier.close()
    
    processes = args.cores
    pool = Pool(processes = processes)

    KEY_FRAME_PERIOD = 2 # in seconds
    q = Manager().Queue()
    total_frame = 0

    new_flag = True
    while True:
        if not new_flag:
            print "wait..."
            time.sleep(1)
        stream_list = get_list(CLOUDLET_RESOURCE, STREAM_RESOURCE)
        new_flag = False
        prev_stream = None
        for stream in stream_list:
            if stream.get("stream_description").find("denatured") == -1 or stream.get("stream_description").find("video") == -1 or stream.get("stream_description").find("pstf") != -1:
                prev_stream = stream
                continue
            ILP_max = [] 
            for i in xrange(len(CLASSES)):
                ILP_max.append(0)
            ILP_list = []
            for i in xrange(len(CLASSES)):
                ILP_list.append([])
            path, name = stream.get("path").replace("mnt", "cloudletstore").rsplit('/', 1)
            print os.path.join(path, name)
            path_p, name_p = prev_stream.get("path").replace("mnt", "cloudletstore").rsplit('/', 1)
            print os.path.join(path_p, name_p)
            statinfo = os.stat(os.path.join(path_p, name_p))      
            prev_stream = stream
           
            if statinfo.st_size == 0:
                continue

            new_flag = True
            frame_rate = 30
     
            capture = cv.CaptureFromFile(os.path.join(path, name))
            frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
            total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
            frame = cv.QueryFrame(capture)
            print frame_rate, total_frames
            print capture
   
            start_time = time.time()
            
            key_frame_counter_base = 0    
            while frame:
                process_num = 0
                while frame:
                    cv.SaveImage("indexing" + "%d.png" % process_num, frame)
                    for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)):
                        frame = cv.QueryFrame(capture)
                    process_num += 1
                    if process_num == processes:
                        break
                pool.map(calculate_class, [(q, x) for x in xrange(key_frame_counter_base, key_frame_counter_base + process_num)])
          
            while not q.empty():
                q_entry = q.get()
                key_frame_counter = q_entry[0]
                ILP = q_entry[1]
                for class_index, score in enumerate(ILP): 
                    if score > SCORE_THRESHOLD:
                        ILP_list[class_index].append((key_frame_counter * int(KEY_FRAME_PERIOD * frame_rate) + 1, score))
                        print (CLASSES[class_index], "%.02f" % score),
                    if score > ILP_max[class_index]:
                        ILP_max[class_index] = score
                print

                key_frame_counter_base += process_num

            for class_index, frame_list in enumerate(ILP_list):
                if not frame_list:
                    continue
                frame_list_split = split_frame_list(frame_list, int(KEY_FRAME_PERIOD * frame_rate) * 2)
                for frame_list, local_max_score in frame_list_split: 
                    tag_entry = {}
                    tag_entry["tag"] = CLASSES[class_index] + ":%d" % (ILP_max[class_index] * 100)
                    tag_entry["tag_value"] = local_max_score
                    tag_entry["offset"] = frame_list[0] / frame_rate
                    tag_entry["duration"] = (frame_list[-1] - frame_list[0]) / frame_rate
                    tag_entry["segment"] = stream.get("segment")
                    print tag_entry
                    ret_dict = post(CLOUDLET_RESOURCE, TAG_RESOURCE, tag_entry)        
        
            if stream.get("stream_description").find("pstf") == -1:
                stream_entry = {"stream_description": stream.get("stream_description") + "pstf;"}
                ret_dict = put(CLOUDLET_RESOURCE, stream.get("resource_uri"), stream_entry)        
           
            elapse_time = time.time() - start_time
            print "max score:"
            print [(CLASSES[class_index], "%.02f" % score) for class_index, score in enumerate(ILP_max)]
            print "total time: %.2f, key frames: %d, frame per sec: %.2f" \
               % (elapse_time, key_frame_counter_base, key_frame_counter_base / elapse_time)
            print
Example #44
0
    def simulate(self, cell):
        run_param = self.run_param
        amp = self.get_previous_amp()
        self.run_param['init_amp'] = amp

        # String to put before output to the terminal.
        str_start = self.name
        str_start += " "*(20 - len(self.name)) + ": "

        if self.only_apply_electrode:
            if self.verbose:
                print str_start + "Only applying electrode."
            soma_clamp_params = {
                'idx': cell.somaidx,
                'record_current': False,
                'amp': amp,  #  [nA]
                'dur': self.run_param['duration'],  # [ms]
                'delay': self.run_param['delay'],  # [ms]
                'pptype': self.run_param['pptype'],
            }
            stim = LFPy.StimIntElectrode(cell, **soma_clamp_params)
            return

        # Find a current that generates n spikes.
        amp_low = 0
        amp_high = 0
        # Copy the run param so they can be given to the "sub" simulation.
        sub_run_param = run_param.copy()
        while True:
            # Gather data from the sub process in a dictionary.
            # manager = Manager()
            sub_data = Manager().dict()
            # Set the new amp.
            sub_run_param['amp'] = amp
            # Run the "sub" simulation.
            target = self.simulate_sub
            args = (cell, sub_data, sub_run_param)
            process = Process(target=target, args=args)
            process.start()
            process.join()

            spike_cnt = sub_data.get('spike_cnt', 0)
            if self.verbose:
                print str_start + 'Found {} spikes at current {} nA.'.format(spike_cnt,
                                                                 amp)
            # Change the amplitude according to the spike cnt.
            if spike_cnt == run_param['spikes']:
                break
            elif spike_cnt < run_param['spikes']:
                amp_low = amp
            elif spike_cnt > run_param['spikes']:
                amp_high = amp
            # Increase the amp until we have more than the desired number of spikes.
            if amp_high == 0:
                amp = 1.25 * amp
                continue
            amp = 0.5 * (amp_high + amp_low)
            if amp < 1e-4 or amp > 1e4:
                print str_start + 'Curent amplitude is above or under threshold, finishing.'
                return

        # Give the data back.
        self.data['amp'] = amp
        self.data['spike_cnt'] = spike_cnt
        self.data['dt'] = cell.timeres_NEURON
        self.data['stimulus_i'] = sub_data['stimulus_i']
        self.data['soma_v'] = sub_data['soma_v']
        self.data['soma_t'] = sub_data['soma_t']

        if self.apply_electrode_at_finish:
            soma_clamp_params = {
                'idx': cell.somaidx,
                'record_current': True,
                'amp': self.data['amp'],  #  [nA]
                'dur': self.run_param['duration'],  # [ms]
                'delay': self.run_param['delay'],  # [ms]
                'pptype': self.run_param['pptype'],
            }
            stim = LFPy.StimIntElectrode(cell, **soma_clamp_params)
Example #45
0
class TestVariant(object):
    def __init__(self, name, compile_flags=[]):
        self.name = name
        self.compile_flags = \
            ['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
             '-BaselineMode'] + compile_flags
        self.tags = tags.copy()
        self.not_tags = not_tags.union(
            ['{}_{}'.format(x, name) for x in ('fails','exclude')])

        self.msg_queue = Manager().Queue() # messages from multi processes
        self.test_result = TestResult()
        self._print_lines = [] # _print lines buffer
        self._last_len = 0

    # check if this test variant should run a given test
    def _should_test(self, test):
        tags = split_tags(test.get('tags'))
        if not tags.isdisjoint(self.not_tags):
            return False
        if self.tags and not self.tags.issubset(tags):
            return False
        if not_compile_flags: # exclude unsupported compile-flags if any
            flags = test.get('compile-flags')
            if flags and \
                    not not_compile_flags.isdisjoint(flags.lower().split()):
                return False
        return True

    # print output from multi-process run, to be sent with result message
    def _print(self, line):
        self._print_lines.append(str(line))

    # queue a test result from multi-process runs
    def _log_result(self, test, fail):
        output = '\n'.join(self._print_lines) # collect buffered _print output
        self._print_lines = []
        self.msg_queue.put((test.filename, fail, test.elapsed_time, output))

    # (on main process) process one queued message
    def _process_msg(self, msg):
        filename, fail, elapsed_time, output = msg
        self.test_result.log(filename, fail=fail)
        line = '[{}/{} {:4.2f}] {} -> {}'.format(
            self.test_result.total_count(),
            self.test_count,
            elapsed_time,
            'Failed' if fail else 'Passed',
            self._short_name(filename))
        padding = self._last_len - len(line)
        print(line + ' ' * padding, end='\n' if fail else '\r')
        log_message(line)
        self._last_len = len(line) if not fail else 0
        if len(output) > 0:
            print_and_log(output)

    # get a shorter test file path for display only
    def _short_name(self, filename):
        folder = os.path.basename(os.path.dirname(filename))
        return os.path.join(folder, os.path.basename(filename))

    # (on main process) wait and process one queued message
    def _process_one_msg(self):
        self._process_msg(self.msg_queue.get())

    # log a failed test with details
    def _show_failed(self, test, flags, exit_code, output,
                    expected_output=None, timedout=False):
        if timedout:
            self._print('ERROR: Test timed out!')
        self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
        if expected_output == None or timedout:
            self._print("\nOutput:")
            self._print("----------------------------")
            self._print(output)
            self._print("----------------------------")
        else:
            lst_output = output.split(b'\n')
            lst_expected = expected_output.split(b'\n')
            ln = min(len(lst_output), len(lst_expected))
            for i in range(0, ln):
                if lst_output[i] != lst_expected[i]:
                    self._print("Output: (at line " + str(i) + ")")
                    self._print("----------------------------")
                    self._print(lst_output[i])
                    self._print("----------------------------")
                    self._print("Expected Output:")
                    self._print("----------------------------")
                    self._print(lst_expected[i])
                    self._print("----------------------------")
                    break

        self._print("exit code: {}".format(exit_code))
        self._log_result(test, fail=True)

    # temp: try find real file name on hard drive if case mismatch
    def _check_file(self, folder, filename):
        path = os.path.join(folder, filename)
        if os.path.isfile(path):
            return path     # file exists on disk

        filename_lower = filename.lower()
        files = os.listdir(folder)
        for i in range(len(files)):
            if files[i].lower() == filename_lower:
                self._print('\nWARNING: {} should be {}\n'.format(
                    path, files[i]))
                return os.path.join(folder, files[i])

        # cann't find the file, just return the path and let it error out
        return path

    # run one test under this variant
    def test_one(self, test):
        try:
            test.start()
            self._run_one_test(test)
        except Exception:
            test.done()
            self._print(traceback.format_exc())
            self._log_result(test, fail=True)

    # internally perform one test run
    def _run_one_test(self, test):
        folder = test.folder
        js_file = test.filename = self._check_file(folder, test.files)
        js_output = b''

        working_path = os.path.dirname(js_file)

        flags = test.get('compile-flags')
        flags = self.compile_flags + (flags.split() if flags else [])
        cmd = [binary] + flags + [os.path.basename(js_file)]

        test.start()
        proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
        timeout_data = [proc, False]
        def timeout_func(timeout_data):
            timeout_data[0].kill()
            timeout_data[1] = True
        timeout = test.get('timeout', args.timeout) # test override or default
        timer = Timer(timeout, timeout_func, [timeout_data])
        try:
            timer.start()
            js_output = normalize_new_line(proc.communicate()[0])
            exit_code = proc.wait()
        finally:
            timer.cancel()
        test.done()

        # shared _show_failed args
        fail_args = { 'test': test, 'flags': flags,
                      'exit_code': exit_code, 'output': js_output };

        # check timed out
        if (timeout_data[1]):
            return self._show_failed(timedout=True, **fail_args)

        # check ch failed
        if exit_code != 0:
            return self._show_failed(**fail_args)

        # check output
        if 'baseline' not in test:
            # output lines must be 'pass' or 'passed' or empty
            lines = (line.lower() for line in js_output.split(b'\n'))
            if any(line != b'' and line != b'pass' and line != b'passed'
                    for line in lines):
                return self._show_failed(**fail_args)
        else:
            baseline = test.get('baseline')
            if baseline:
                # perform baseline comparison
                baseline = self._check_file(working_path, baseline)
                with open(baseline, 'rb') as bs_file:
                    baseline_output = bs_file.read()

                # Cleanup carriage return
                # todo: remove carriage return at the end of the line
                #       or better fix ch to output same on all platforms
                expected_output = normalize_new_line(baseline_output)

                if expected_output != js_output:
                    return self._show_failed(
                        expected_output=expected_output, **fail_args)

        # passed
        self._log_result(test, fail=False)

    # run tests under this variant, using given multiprocessing Pool
    def run(self, tests, pool):
        print_and_log('\n############# Starting {} variant #############'\
                        .format(self.name))
        if self.tags:
            print_and_log('  tags: {}'.format(self.tags))
        for x in self.not_tags:
            print_and_log('  exclude: {}'.format(x))
        print_and_log()

        # filter tests to run
        tests = [x for x in tests if self._should_test(x)]
        self.test_count = len(tests)

        # run tests in parallel
        result = pool.map_async(run_one, [(self,test) for test in tests])
        while self.test_result.total_count() != self.test_count:
            self._process_one_msg()

    # print test result summary
    def print_summary(self):
        print_and_log('\n######## Logs for {} variant ########'\
                        .format(self.name))
        for folder, result in sorted(self.test_result.folders.items()):
            print_and_log('{}: {}'.format(folder, result))
        print_and_log("----------------------------")
        print_and_log('Total: {}'.format(self.test_result))
Example #46
0
    FPS_clock = pygame.time.Clock()
    game_state = state.GameState()
    game_gui = gui.GUI(game_state)
    game_event_handler = event_handler.EventLogic(game_state, game_gui)
    game_gui.add_handler(game_event_handler)
    game_gui.draw(game_state.get_state())
    pygame.display.update()
    commandQueue = Manager().Queue()
    listeningProcess = Process(target=voice_listener, args=(game_event_handler, commandQueue,))
    while True:
        game_gui.draw(game_state.get_state())
        game_event_handler.event_handler()
        if game_state.get_state() == "SSH season voice mode" or game_state.get_state() == "Web season voice mode":
            if not game_event_handler.queue.empty():
                val = game_event_handler.queue.get()
                if val:
                    listeningProcess.start()
                else:
                    listeningProcess.terminate()
                    listeningProcess.join()
                    listeningProcess = Process(target=voice_listener, args=(game_event_handler, commandQueue,))
            if not commandQueue.empty():
                voice_command = commandQueue.get()
                try:
                    game_event_handler.pipi.say(voice_command %
                                                game_gui.bool_to_text[str(game_gui.light_to_string[voice_command])])
                except KeyError:
                    pass
        pygame.display.update()
        FPS_clock.tick(30)
Example #47
0
    ####################################
    start_CPU = time.clock()
    start_time = time.time()

    p = Pool(core_num+1)
    result = []
    for i in range(core_num):
        p.apply_async(m_ui_pc, args=(task_list[i], task_list[i+1], queue))

    p.apply_async(read_queue, args=(queue, q_result, core_num))
    print('Waiting for all subprocesses done...')
    p.close()
    p.join()
    print('All subprocesses done.')

    value = q_result.get(True)
    final = value.sort_index()
    print final

    end_CPU = time.clock()
    end_time = time.time()
    print '%f CPU second' % (end_CPU - start_CPU)
    print '%f real second' % (end_time - start_time)
    print

    temp = Data('./data/train_behavior.db')
    final.iloc[0:test_num].to_sql('user_item_behavior', temp.conn, if_exists='replace')
    show = pd.read_sql_query('select* from user_item_behavior', temp.conn, index_col='index')
    print show

    all_data.close()
Example #48
0
class WorkerPool(object):
    """
    Manages a bunch of worker processes.
    """
    def __init__(self, workers=4):
        if workers < 1:
            raise ValueError(_("at least one worker is required"))

        # A "worker" is simply a tuple consisting of a Process object
        # and our end of a pipe. Each worker is always adressed with
        # it's "worker id" (wid): That's the index of the tuple in
        # self.workers.
        self.workers = []

        # Lists of wids. idle_workers are those that are marked
        # explicitly as idle (don't confuse this with workers that
        # aren't processing a job right now).
        self.idle_workers = []
        self.workers_alive = list(range(workers))

        # We don't need to know *which* worker is currently processing a
        # job. We only need to know how many there are.
        self.jobs_open = 0

        # The public message queue. Workers ask for jobs here, report
        # finished work and log items.
        # Note: There's (at least) two ways to organize a pool like
        # this. One is to only open a pipe to each worker. Then, you can
        # use select() to see which pipe can be read from. Problem is,
        # this is not really supported by the multiprocessing module;
        # you have to dig into the internals and that's ugly. So, we go
        # for another option: A managed queue. Multiple processes can
        # write to it (all the workers do) and the parent can read from
        # it. However, this is only feasible when workers must talk to
        # the parent. The parent can't talk to the workers using this
        # queue. Thus, we still need a dedicated pipe to each worker
        # (see below).
        self.messages = Manager().Queue()

        for i in range(workers):
            (parent_conn, child_conn) = Pipe()
            p = Process(target=_worker_process,
                        args=(i, self.messages, child_conn))
            p.start()
            self.workers.append((p, parent_conn))

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.shutdown()

    def get_event(self):
        """
        Blocks until a message from a worker is received.
        """
        msg = self.messages.get()
        if msg['msg'] == 'FINISHED_WORK':
            self.jobs_open -= 1
            # check for exception in child process and raise it
            # here in the parent
            if not msg['traceback'] is None:
                raise WorkerException(
                    msg['exception_task_id'],
                    msg['exception'],
                    msg['traceback'],
                )
        elif msg['msg'] == 'LOG_ENTRY':
            LOG.handle(msg['log_entry'])
        return msg

    def start_task(self, wid, target, task_id=None, args=None, kwargs=None):
        """
        wid         id of the worker to use
        target      any callable (includes bound methods)
        task_id     something to remember this worker by
        args        list of positional arguments passed to target
        kwargs      dictionary of keyword arguments passed to target
        """
        if args is None:
            args = []
        else:
            args = list(args)
        if kwargs is None:
            kwargs = {}

        if ismethod(target):
            target_obj = target.__self__
            target = target.__name__
        else:
            target_obj = None

        (process, pipe) = self.workers[wid]
        pipe.send({
            'msg': 'RUN',
            'task_id': task_id,
            'target': target,
            'target_obj': target_obj,
            'args': args,
            'kwargs': kwargs,
        })

        self.jobs_open += 1

    def mark_idle(self, wid):
        """
        Mark a worker as "idle".
        """
        # We don't really need to do something here. The worker will
        # simply keep blocking at his "pipe.read()". Just store his id
        # so we can answer him later.
        self.idle_workers.append(wid)

    def quit(self, wid):
        """
        Shutdown a worker.
        """
        (process, pipe) = self.workers[wid]
        try:
            pipe.send({'msg': 'DIE'})
        except IOError:
            pass
        pipe.close()
        process.join(JOIN_TIMEOUT)
        if process.is_alive():
            LOG.warn(_(
                "worker process with PID {pid} didn't join "
                "within {time} seconds, terminating...").format(
                    pid=process.pid,
                    time=JOIN_TIMEOUT,
                )
            )
            process.terminate()
        self.workers_alive.remove(wid)

    def shutdown(self):
        """
        Shutdown all workers.
        """
        while self.workers_alive:
            self.quit(self.workers_alive[0])

    def activate_idle_workers(self):
        """
        Tell all idle workers to ask for work again.
        """
        for wid in self.idle_workers:
            # Send a noop to this worker. He will simply ask for new
            # work again.
            (process, pipe) = self.workers[wid]
            pipe.send({'msg': 'NOOP'})
        self.idle_workers = []

    def keep_running(self):
        """
        Returns True if this pool is not ready to die.
        """
        return self.jobs_open > 0 or self.workers_alive
Example #49
0
class TestVariant(object):
    def __init__(self, name, compile_flags=[], variant_not_tags=[]):
        self.name = name
        self.compile_flags = \
            ['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
             '-BaselineMode'] + compile_flags
        self._compile_flags_has_expansion = self._has_expansion(compile_flags)
        self.tags = tags.copy()
        self.not_tags = not_tags.union(variant_not_tags).union(
            ['{}_{}'.format(x, name) for x in ('fails','exclude')])

        self.msg_queue = Manager().Queue() # messages from multi processes
        self.test_result = TestResult()
        self.test_count = 0
        self._print_lines = [] # _print lines buffer
        self._last_len = 0

    @staticmethod
    def _has_expansion(flags):
        return any(re.match('.*\${.*}', f) for f in flags)

    @staticmethod
    def _expand(flag, test):
        return re.sub('\${id}', str(test.id), flag)

    def _expand_compile_flags(self, test):
        if self._compile_flags_has_expansion:
            return [self._expand(flag, test) for flag in self.compile_flags]
        return self.compile_flags

    # check if this test variant should run a given test
    def _should_test(self, test):
        tags = split_tags(test.get('tags'))
        if not tags.isdisjoint(self.not_tags):
            return False
        if self.tags and not self.tags.issubset(tags):
            return False
        if not_compile_flags: # exclude unsupported compile-flags if any
            flags = test.get('compile-flags')
            if flags and \
                    not not_compile_flags.isdisjoint(flags.lower().split()):
                return False
        return True

    # print output from multi-process run, to be sent with result message
    def _print(self, line):
        self._print_lines.append(str(line))

    # queue a test result from multi-process runs
    def _log_result(self, test, fail):
        output = '\n'.join(self._print_lines) # collect buffered _print output
        self._print_lines = []
        self.msg_queue.put((test.filename, fail, test.elapsed_time, output))

    # (on main process) process one queued message
    def _process_msg(self, msg):
        filename, fail, elapsed_time, output = msg
        self.test_result.log(filename, fail=fail)
        line = '[{}/{} {:4.2f}] {} -> {}'.format(
            self.test_result.total_count(),
            self.test_count,
            elapsed_time,
            'Failed' if fail else 'Passed',
            self._short_name(filename))
        padding = self._last_len - len(line)
        print(line + ' ' * padding, end='\n' if fail else '\r')
        log_message(line)
        self._last_len = len(line) if not fail else 0
        if len(output) > 0:
            print_and_log(output)

    # get a shorter test file path for display only
    def _short_name(self, filename):
        folder = os.path.basename(os.path.dirname(filename))
        return os.path.join(folder, os.path.basename(filename))

    # (on main process) wait and process one queued message
    def _process_one_msg(self):
        self._process_msg(self.msg_queue.get())

    # log a failed test with details
    def _show_failed(self, test, flags, exit_code, output,
                    expected_output=None, timedout=False):
        if timedout:
            if warn_on_timeout:
                self._print('WARNING: Test timed out!')
            else:
                self._print('ERROR: Test timed out!')
        self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
        if expected_output == None or timedout:
            self._print("\nOutput:")
            self._print("----------------------------")
            self._print(output.decode('utf-8'))
            self._print("----------------------------")
        else:
            lst_output = output.split(b'\n')
            lst_expected = expected_output.split(b'\n')
            ln = min(len(lst_output), len(lst_expected))
            for i in range(0, ln):
                if lst_output[i] != lst_expected[i]:
                    self._print("Output: (at line " + str(i+1) + ")")
                    self._print("----------------------------")
                    self._print(lst_output[i])
                    self._print("----------------------------")
                    self._print("Expected Output:")
                    self._print("----------------------------")
                    self._print(lst_expected[i])
                    self._print("----------------------------")
                    break

        self._print("exit code: {}".format(exit_code))
        if warn_on_timeout and timedout:
            self._log_result(test, fail=False)
        else:
            self._log_result(test, fail=True)

    # temp: try find real file name on hard drive if case mismatch
    def _check_file(self, folder, filename):
        path = os.path.join(folder, filename)
        if os.path.isfile(path):
            return path     # file exists on disk

        filename_lower = filename.lower()
        files = os.listdir(folder)
        for i in range(len(files)):
            if files[i].lower() == filename_lower:
                self._print('\nWARNING: {} should be {}\n'.format(
                    path, files[i]))
                return os.path.join(folder, files[i])

        # cann't find the file, just return the path and let it error out
        return path

    # run one test under this variant
    def test_one(self, test):
        try:
            test.start()
            self._run_one_test(test)
        except Exception:
            test.done()
            self._print(traceback.format_exc())
            self._log_result(test, fail=True)

    # internally perform one test run
    def _run_one_test(self, test):
        folder = test.folder
        js_file = test.filename = self._check_file(folder, test.files)
        js_output = b''

        working_path = os.path.dirname(js_file)

        flags = test.get('compile-flags') or ''
        flags = self._expand_compile_flags(test) + \
                    args.flags.split() + \
                    flags.split()

        if test.get('custom-config-file') != None:
            flags = ['-CustomConfigFile:' + test.get('custom-config-file')]

        if args.lldb == None:
            cmd = [binary] + flags + [os.path.basename(js_file)]
        else:
            lldb_file = open(working_path + '/' + os.path.basename(js_file) + '.lldb.cmd', 'w')
            lldb_command = ['run'] + flags + [os.path.basename(js_file)]
            lldb_file.write(' '.join([str(s) for s in lldb_command]));
            lldb_file.close()
            cmd = ['lldb'] + [binary] + ['-s'] + [os.path.basename(js_file) + '.lldb.cmd'] + ['-o bt'] + ['-b']

        test.start()
        proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
        timeout_data = [proc, False]
        def timeout_func(timeout_data):
            timeout_data[0].kill()
            timeout_data[1] = True
        timeout = test.get('timeout', args.timeout) # test override or default
        timer = Timer(timeout, timeout_func, [timeout_data])
        skip_baseline_match = False
        try:
            timer.start()
            js_output = normalize_new_line(proc.communicate()[0])
            exit_code = proc.wait()
            # if -lldb was set; check if test was crashed before corrupting the output
            search_for = " exited with status = 0 (0x00000000)"
            if args.lldb != None and exit_code == 0 and js_output.index(search_for) > 0:
                js_output = js_output[0:js_output.index(search_for)]
                exit_pos = js_output.rfind('\nProcess ')
                if exit_pos > len(js_output) - 20: # if [Process ????? <seach for>]
                    if 'baseline' not in test:
                        js_output = "pass"
                    else:
                        skip_baseline_match = True
        finally:
            timer.cancel()
        test.done()

        # shared _show_failed args
        fail_args = { 'test': test, 'flags': flags,
                      'exit_code': exit_code, 'output': js_output }

        # check timed out
        if (timeout_data[1]):
            return self._show_failed(timedout=True, **fail_args)

        # check ch failed
        if exit_code != 0:
            return self._show_failed(**fail_args)

        # check output
        if 'baseline' not in test:
            # output lines must be 'pass' or 'passed' or empty
            lines = (line.lower() for line in js_output.split(b'\n'))
            if any(line != b'' and line != b'pass' and line != b'passed'
                    for line in lines):
                return self._show_failed(**fail_args)
        else:
            baseline = test.get('baseline')
            if not skip_baseline_match and baseline:
                # perform baseline comparison
                baseline = self._check_file(folder, baseline)
                with open(baseline, 'rb') as bs_file:
                    baseline_output = bs_file.read()

                # Cleanup carriage return
                # todo: remove carriage return at the end of the line
                #       or better fix ch to output same on all platforms
                expected_output = normalize_new_line(baseline_output)

                if expected_output != js_output:
                    return self._show_failed(
                        expected_output=expected_output, **fail_args)

        # passed
        if verbose:
            self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
        self._log_result(test, fail=False)

    # run tests under this variant, using given multiprocessing Pool
    def _run(self, tests, pool):
        print_and_log('\n############# Starting {} variant #############'\
                        .format(self.name))
        if self.tags:
            print_and_log('  tags: {}'.format(self.tags))
        for x in self.not_tags:
            print_and_log('  exclude: {}'.format(x))
        print_and_log()

        # filter tests to run
        tests = [x for x in tests if self._should_test(x)]
        self.test_count += len(tests)

        # run tests in parallel
        result = pool.map_async(run_one, [(self,test) for test in tests])
        while self.test_result.total_count() != self.test_count:
            self._process_one_msg()

    # print test result summary
    def print_summary(self):
        print_and_log('\n######## Logs for {} variant ########'\
                        .format(self.name))
        for folder, result in sorted(self.test_result.folders.items()):
            print_and_log('{}: {}'.format(folder, result))
        print_and_log("----------------------------")
        print_and_log('Total: {}'.format(self.test_result))

    # run all tests from testLoader
    def run(self, testLoader, pool, sequential_pool):
        tests, sequential_tests = [], []
        for folder in testLoader.folders():
            if folder.tags.isdisjoint(self.not_tags):
                dest = tests if not folder.is_sequential else sequential_tests
                dest += folder.tests
        if tests:
            self._run(tests, pool)
        if sequential_tests:
            self._run(sequential_tests, sequential_pool)
Example #50
0
    calculation_params = []
    for _ in range(0, tasks):
        calculation_params.append([])
    for index in range(0, precision):
        calculation_params[index % tasks].append(index)

    queue = Manager().Queue(maxsize=tasks)
    start_time = timer()
    processes = []
    for thread_index in range(0, len(calculation_params)):
        p = Process(target=calculate, args=(calculation_params[thread_index],
                                            thread_index, queue, quiet))
        processes.append(p)
        p.start()

    result = 0
    for _ in range(0, tasks):
        result += queue.get()

    for p in processes:
        p.join()

    time = timer() - start_time
    message = 'Using %s tasks\nElapsed time: %s seconds' % (tasks,
                                                            time)
    if not quiet:
        print(message)
    if output_file_name:
        with open(output_file_name, 'a') as file:
            file.write('%s\n' % unicode(str(time), "utf-8"))
Example #51
0
#######################################
# 再生
#######################################
# 現在再生中の音声のインデックス
nowplaying = -1

# 現在再生中のaplayコマンドのProcess
playing = None

# 進捗を確認しつつ音声を読み上げる
while ( not r.ready() ) or ( nowplaying != len(arg) ) or ( playing is not None ):
	time.sleep(0.5)
	# 音声合成の終了報告があるかキューを確認する。
	for _ in range(queue.qsize()):
		compiled_index = queue.get()
		l[compiled_index] = 1
	# 再生できるならしてみる?
	if nowplaying < len(arg):
		if playing is None:
			if l[nowplaying + 1] == 1:
				# まとめてWAVファイルを指定できるときはする
				listindex = list()
				while l[nowplaying + 1] == 1:
					nowplaying += 1
					listindex.append(nowplaying)
				_print( "DEBUG: しゃべるよ![%s]" % str(listindex) )
				playing = play_wav(listindex)
			elif l[nowplaying + 1] == 0:
				_print( "DEBUG: 音声合成の完了待ちです!" )
			else:
Example #52
0
class MPResult(object):
    """
    Sync result between processes
    """

    MATCH = {}  # id -> instance

    def __init__(self, result):
        from multiprocessing import Manager

        # Test result instance
        self.result = result

        # Result queue
        self.queue = Manager().Queue()

    def __getattr__(self, item):
        return getattr(self.result, item)

    @staticmethod
    def pack_result_storage(storage):
        """
        Pack result from storage
        """
        return [(get_master_id(s[0]), s[1]) for s in storage]

    def unpack_result_storage(self, storage):
        """
        Unpack result from storage
        """
        unpack_storage = []

        for master_id, message in storage:
            unpack_storage.append(
                (self.MATCH[master_id], message),
            )

        return unpack_storage

    def match(self, suite):
        """
        Match id of master process to instance
        """
        self.MATCH[get_suite_master_id(suite)] = suite

        def match(s):
            for o in s:
                if isinstance(o, BaseSuite):
                    self.MATCH[get_suite_master_id(o)] = o
                    match(o)
                else:
                    self.MATCH[get_case_master_id(o)] = o

        match(suite)

    def save_result(self):
        """
        Save result in queue
        """
        self.queue.put(
            (
                (
                    self.pack_result_storage(self.result.errors),
                    self.pack_result_storage(self.result.skipped),
                    self.pack_result_storage(self.result.failures),
                ),
                self.result.testsRun,
            ),
        )

    def make_result(self):
        """
        Merge result from queue to result instance
        """
        while not self.queue.empty():
            (errors, skipped, failures), run_tests = self.queue.get()

            self.result.errors.extend(self.unpack_result_storage(errors))
            self.result.skipped.extend(self.unpack_result_storage(skipped))
            self.result.failures.extend(self.unpack_result_storage(failures))

            self.result.testsRun += run_tests
Example #53
0
 '''
 main ,start here
 '''
 now = time.time()
 amazon = AmazonSpider()
 html = amazon.getHTML(amazon.url)
 time.sleep(5)
 amazon.parseBase(html)
 pool = Pool(5)
 q = Manager().Queue()
 for color_id in amazon.color_list:
     pool.apply_async(start, args = (color_id, amazon.size_list, amazon.url, q, ))
 pool.close()
 pool.join()
 while q.qsize() != 0:
     print q.get(False)
 end = time.time()
 print (end-now)
 amazon.driver.quit()
 # for color_id in amazon.color_list:
 #     xpath = "//li[@id='"+color_id+"']"
 #     amazon.driver.find_element_by_xpath(xpath).click()
 #     time.sleep(2)
 #     for size in amazon.size_list:
 #         if size == 'native_size_name_-1':
 #             continue
 #         if 'U' in amazon.driver.find_element_by_xpath("//option[@id='"+size+"']").get_attribute('class'):
 #             continue
 #         else:
 #             amazon.driver.find_element_by_xpath("//option[@id='"+size+"']").click()
 #             time.sleep(2)
Example #54
0
    def launch_expeditions( self , task_request_list , moon_name_list=None ):
        
        global expedition
        
        # ---[ 1 ]------------------------------------------------------
        
        self.log.show( 'Checking Moon list sent by user' )
        
        working_moons = []
        
        if not moon_name_list :
            
            self.log.show( 'Traveling to available Moons on Orbit' )
            
            working_moons = self.orbit.values()
            
        else :
            
            self.log.show( 'Traveling to ' + str( moon_name_list ) )
            
            working_moons = [ self.orbit.get_moon( moon_name ) for moon_name in moon_name_list ]
            
        # ---[ 2 ]------------------------------------------------------
        
        self.log.show( 'Build Thread-safe Queues with no maximum size' )
        
        recv_queue = Manager().Queue( )#len(task_request_list) )
        
        send_queue  = Manager().Queue( )#len(task_request_list) )

        # ---[ 3 ]------------------------------------------------------
        
        self.log.show( 'Enqueue tasks on "send_queue" object' )
        
        for task_obj in task_request_list : 
            
            send_queue.put_nowait( str(task_obj) ) # "Normal" Objects are note thread safe!
            
        self.log.show( 'send_queue = ' + str(send_queue.qsize())+'/'+str(len(task_request_list)) + 'tasks')
        
        # ---[ 4 ]------------------------------------------------------
        
        self.log.show( 'Starting up Process Pool' )
                
        pool = Pool(processes=len(working_moons))

        

        for moon in working_moons :
            
            #running_expeditions.append( Process( target=expedition , args=(self.name , moon.name , moon.ip , moon.port , taskrequest_queue , taskresponse_queue, ) ) ) # Process Object
            pool.apply_async( func=expedition , args=(self.name , moon.name , moon.ip , moon.port , send_queue , recv_queue , ) )

        # ---[ 5 ]------------------------------------------------------
        
        pool.close()
        pool.join()
        
        self.log.show( 'recv_queue = '+ str(recv_queue.qsize())+'/'+str(len(task_request_list)) + 'tasks' )
        
        tmp = []
        while not recv_queue.empty() :
            
            tmp.append( recv_queue.get() )
            
        self.log.show( 'closing queue' )
        
        self.log.show( 'return results' )
        
        return tmp
Example #55
0
class AudioPlayer(object):
    """ Play audio files.

    """

    def __init__(self, filename: str='', show_position: bool=False, **kwargs):
        """ AudioPlayer(filename='', show_position=False, **kwargs) -> Open
        filename and an appropriate audio io for it.

        """

        self._filename = filename
        self._show_position = show_position

        # Setup the msg_dict for sending messages to the child process.
        self._msg_dict = Manager().dict()

        self._control_dict = {}

        # Create a pipe for sending and receiving messages.
        self._control_conn, self._player_conn = Pipe()

        # Open the file.
        if filename:
            self.open(filename, **kwargs)

    def __str__(self) -> str:
        """ The information about the open file.

        """

        # Wait for the stream to open.
        while 'info' not in self._msg_dict: pass

        # Return the info string.
        return self._msg_dict.get('info', '')

    def __repr__(self) -> str:
        """ __repr__ -> Returns a python expression to recreate this instance.

        """

        repr_str = "filename='%(_filename)s'" % self.__dict__

        return '%s(%s)' % (self.__class__.__name__, repr_str)

    def __enter__(self):
        """ Provides the ability to use pythons with statement.

        """

        try:
            return self
        except Exception as err:
            print(err)
            return None

    def __exit__(self, exc_type, exc_value, traceback):
        """ Stop playback when finished.

        """

        try:
            self.stop()
            self._control_conn.close()
            self._player_conn.close()
            return not bool(exc_type)
        except Exception as err:
            print(err)
            return False

    def __del__(self):
        """ Stop playback before deleting.

        """

        if self._control_dict.get('playing', False):
            try:
                self.stop()
            except IOError:
                pass

    def __len__(self):
        """ The length of the file if it has one.

        """

        return self.length if self.length >= 0 else 0

    def playing_wrapper(func):
        """ Wrap methods and only call them if the stream is playing

        """

        @functools_wraps(func)
        def wrapper(self, *args, **kwargs):
            """ Check if stream is playing and if it is then call func
            otherwise print a message and exit.

            """

            if not self.playing:
                print("%(filename)s is not playing." % self._msg_dict)
                return None

            return func(self, *args, **kwargs)

        return wrapper

    def _play_proc(self, msg_dict: dict, pipe: Pipe):
        """ Player process

        """

        # Open the file to play.
        try:
            with open_file(cached=True, **msg_dict) as fileobj:

                # Put the file info in msg_dict.
                msg_dict['info'] = str(fileobj)
                msg_dict['length'] = fileobj.length

                if fileobj._rate < 44100:
                    # if py_imp == 'PyPy':
                    #     blacklist = msg_dict.get('blacklist', [])
                    #     blacklist.append('portaudio')
                    #     msg_dict['blacklist'] = blacklist
                    # else:
                    import audioop

                    # msg_dict['rate'] = 44100
                    state = None

                # Open an audio output device that can handle the data
                # from fileobj.
                device = open_device(fileobj, 'w', cached=True, **msg_dict)
                try:

                    # Set the default number of loops to infinite.
                    fileobj.loops = msg_dict.get('loops', -1)

                    # Initialize variable.
                    buf = b'\x00' * device.buffer_size
                    written = 0

                    # Loop until stopped.
                    while msg_dict.get('playing', True):
                        # Stop if the read buffer is empty or player is
                        # not paused.
                        if not (buf or msg_dict.get('paused', False)):
                            break

                        # Print the stream position.
                        if msg_dict.get('show_position', False):
                            # Only print the position if the stream has a
                            # length.
                            if fileobj.length > 0:
                                # Calculate the percentage played.
                                pos = (fileobj.position * 100) / fileobj.length

                                # Make the string.
                                pos_str = 'Position: %.2f%%' % pos

                                # Find the length of the string.
                                format_len = len(pos_str) + 2

                                # Print the string and after erasing the old
                                # one using ansi escapes.
                                if py_imp == 'PyPy':
                                    # Running in pypy which doesn't have the
                                    # flush parameter in the print function.
                                    print('\033[%dD\033[K%s' % (format_len,
                                        pos_str), end='')
                                    sys_stdout.flush()
                                else:
                                    print('\033[%dD\033[K%s' % (format_len,
                                        pos_str), end='', flush=True)

                        # Keep playing if not paused.
                        if not msg_dict.get('paused', False):
                            # Re-open the device after comming out of
                            # paused state.
                            if device.closed:
                                device = open_device(fileobj, 'w', cached=True,
                                                     **msg_dict)

                            # Read the next buffer full of data.
                            try:
                                buf = fileobj.readline()
                            except KeyboardInterrupt:
                                break

                            # if device._rate != fileobj._rate \
                            #         and py_imp != 'PyPy' and fileobj._rate != 0:
                            if device._rate != fileobj._rate \
                                    and fileobj._rate != 0:
                                # Convert the input sample rate to that of
                                # the output device.
                                buf, state = audioop.ratecv(buf,
                                                            fileobj._width,
                                                            fileobj._channels,
                                                            fileobj._rate,
                                                            int(device._rate),
                                                            state)

                            # Filler for end of partial buffer to elminiate
                            # end of audio noise.
                            if type(buf) == bytes:
                                filler = b'\x00' * (device.buffer_size - len(buf))
                            else:
                                filler = ''

                            # Write buf.
                            try:
                                written = device.write(buf + filler)
                            except KeyboardInterrupt:
                                break
                        else:
                            # Close the device when paused and sleep to
                            # open the audio for another process and
                            # save cpu cycles.
                            if not device.closed:
                                device.close()

                            time_sleep(0.05)

                            # Write a buffer of null bytes so the audio
                            # system can keep its buffer full.
                            # device.write(b'\x00' * device.buffer_size)

                        # Get and process any commands from the parent process.
                        if pipe.poll():
                            # Get the data into temp.
                            command = pipe.recv()

                            if 'getposition' in command:
                                pipe.send(fileobj.position)
                            elif 'setposition' in command:
                                fileobj.position = command['setposition']
                            elif 'getloops' in command:
                                pipe.send(fileobj.loops)
                            elif 'setloops' in command:
                                fileobj.loops = command['setloops']
                            elif 'getloopcount' in command:
                                pipe.send(fileobj.loop_count)
                except Exception as err:
                    print(err)
                finally:
                    if not device.closed:
                        device.close()

        except IOError as err:
            from time import sleep
            msg_dict['error'] = err
            msg_dict['info'] = ''
            msg_dict['length'] = 0
            print(err)
        finally:
            try:
                # Set playing to False for the parent.
                msg_dict['playing'] = False
            except BrokenPipeError:
                pass

    def open(self, filename: str, **kwargs):
        """ open(filename) -> Open an audio file to play.

        """

        # Stop the current file from playing.
        self.stop()

        # Set the new filename.
        self._filename = filename

        # Reset the message dictionary so none of the old info is
        # re-used.
        self._msg_dict.clear()

        # Fill the message dictionary with the new info.
        self._msg_dict['show_position'] = self._show_position
        self._msg_dict['filename'] = filename
        self._msg_dict.update(kwargs)

        self._control_dict.update(self._msg_dict)

        # Pause it so when we call play later it will start the player
        # but not the audio playback.  Call play again to start audio
        # playback.
        self.pause()

        # Start the playback process in a paused state.  Requires a
        # second call to play to un-pause.
        self.play()

    def play(self):
        """ play() -> Start playback.

        """

        if not self._msg_dict.get('playing', False):
            # Set playing to True for the child process.
            self._msg_dict['playing'] = True

            # Open a new process to play a file in the background.
            self._play_p = Process(target=self._play_proc,
                                   args=(self._msg_dict, self._player_conn))

            # Start the process.
            self._play_p.start()
        elif self._msg_dict.get('paused', True):
            # Un-pause if paused.
            self._msg_dict['paused'] = False

        self._control_dict.update(self._msg_dict)

    def stop(self):
        """ stop() -> Stop playback.

        """

        if self._msg_dict.get('playing', False):
            # Stop playback.
            self._msg_dict['playing'] = False

            # Wait for the player process to stop.
            self._play_p.join()

            # Un-Pause.
            self._msg_dict['paused'] = False

        self._control_dict.update(self._msg_dict)

    def pause(self):
        """ pause() -> Pause playback.

        """

        # Pause playback.
        self._msg_dict['paused'] = True
        self._control_dict.update(self._msg_dict)

    @property
    def error(self) -> bool:
        """ True if playing.

        """

        return self._msg_dict.get('error', False)

    @property
    def paused(self) -> bool:
        """ True if playback is paused.

        """

        return self._msg_dict.get('paused', False)

    @property
    def playing(self) -> bool:
        """ True if playing.

        """

        return self._msg_dict.get('playing', False)

    @property
    def length(self) -> int:
        """ Length of audio.

        """

        return self._msg_dict.get('length', 0)

    @property
    @playing_wrapper
    def position(self) -> int:
        """ Current position.

        """

        self._control_conn.send('getposition')
        return self._control_conn.recv()

    @position.setter
    @playing_wrapper
    def position(self, value: int):
        """ Set the current position.

        """

        self._control_conn.send({'setposition': int(value)})

    @property
    @playing_wrapper
    def loops(self) -> int:
        """ Number of times to loop (playback time + 1).

        """

        self._control_conn.send('getloops')
        return self._control_conn.recv()

    @loops.setter
    @playing_wrapper
    def loops(self, value: int):
        """ Number of times to loop (playback time + 1).

        """

        self._control_conn.send({'setloops': int(value)})

    @property
    @playing_wrapper
    def loop_count(self) -> int:
        """ Number of times the player has looped.

        """

        self._control_conn.send('getloopcount')
        return self._control_conn.recv()

    @playing_wrapper
    def seek(self, offset: int, whence=SEEK_SET) -> int:
        """ seek(position) -> Seek to position in mod.

        """

        if whence == SEEK_CUR:
            self.position += offset
        elif whence == SEEK_END:
            self.position = self.length - offset
        else:
            self.position = offset

        return self.position

    @playing_wrapper
    def tell(self) -> int:
        """ tell -> Returns the current position.

        """

        return self.position
Example #56
0
class Queue_server(object):
    
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''
    def __init__(self ,wx_lists=()):
        self.__queue = Manager().Queue(-1)
        self.init_wx_lists(wx_lists)
        self.__fail_list = Manager().list()
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''      
    def init_wx_lists(self ,wx_lists=()):
        for wx in wx_lists:
            self.put(wx)
    '''
                 添加元素
     @param mixed value 要添加的元素
    '''
    def put(self ,value):
        self.__queue.put(value)
    
    '''
                 弹出元素
     @return mixed       
    '''
    def get(self):
        if not self.empty():
            return self.__queue.get()
        return False
    
    '''
                 获取队列
     @return mixed       
    '''
    def get_wx_lists_queue(self):
        return self.__queue
    
    '''
                             获取队列大小
        @return int
    '''
    def get_size(self):
        return self.__queue.qsize()
    
    '''
                             队列是否为空
        @return bool
    '''
    def empty(self):
        return self.__queue.empty()
    
    '''
                             添加失败数据
        @param tuple wx_data 公众号信息
        @return bool
    '''     
    def put_fail_wx(self , wx_data):
        self.__fail_list.append(wx_data)
    
    '''
                             打印失败列表
    '''    
    def print_fail_list(self ,flush=None):
        if len(self.__fail_list) > 0 :
            for fail in self.__fail_list:
                self.put(fail)
                print 'the fail wx : {0}' . format(fail)
            if not flush:
                self.__fail_list = Manager().list()
        elif flush:
            print 'all success'
            
    #判断是否有错
    def is_have_failed(self):
        #判断是否有失败的公众号重新加入队列中
        return not self.empty()
Example #57
0
 def get(self, key):
     result_queue = Manager().Queue()
     self.queue.put(('GET', (key,), result_queue,))
     return result_queue.get()
Example #58
0
class Media(object):
    def  __init__(self,movieFile):
        self._movieFile = movieFile
        self._isMovie = self._movieFile.endswith(".mp4") or self._movieFile.endswith(".ogg")
        self._cond = mp.Condition()
        self._queue = Manager().Queue(100)
        self._totalFrames = mp.Value("i",0)
        self._finished = mp.Value("i",0)

    def _prefetch(self):
        self._cond.acquire()
        if self._isMovie:
            self._cap = cv2.VideoCapture(self._movieFile)
        else:
            if not self._movieFile.endswith("/"):
                self._movieFile = self._movieFile + "/"

            self._imgFiles =  glob.glob(self._movieFile + "*.jpg")
            self._imgFiles.sort()

        self._totalFrames.value = self._getTotalFrames()
        self._cond.notify_all()
        self._cond.release()

        count = 0
        while(self._finished.value == 0):
            flag,frame = self._getNextFrame()
            if flag :
                try:
                    self._queue.put( frame , block=True )
                    count += 1
                except:
                    pass
            else:
                break

        self._closeMedia()

    def openMedia(self):
        self._cursor = 0
        mp.Process(target=self._prefetch).start()
        self._cond.acquire()
        self._cond.wait()
        self._cond.release()

    def getTotalFrames(self):
        return self._totalFrames.value

    def _getTotalFrames(self):
        if self._isMovie:
            return int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
        else:
            return len(self._imgFiles)

    def _closeMedia(self):
        if self._isMovie:
            self._cap.release()

    def getNextFrame(self):
        if( self._cursor >= self._totalFrames.value):
            return (False,None)

        self._cursor += 1
        return (True,self._queue.get())

    def _getNextFrame(self):
        if self._isMovie:
            return self._cap.read()
        else:
            if( self._cursor < len(self._imgFiles )):
                frame = cv2.imread(self._imgFiles[self._cursor])
                self._cursor += 1
                return (True,frame)
            else:
                return (False,None)

    def closeMedia(self):
        with self._finished.get_lock():
            self._finished.value  = 1
def search(output_dict, rules_file):
    rules = [rule.split(' | ') for rule in pickle.load(open(rules_file, 'rb'))]
    file_list = JoinableQueue()
    word_dict = Manager().dict()

    for root, subFolders, files in os.walk(os.path.join(os.path.dirname(__file__), 'corpus', 'tagged')):
        for current_file in files:
            if current_file.endswith(".pickle"):
                file_list.put(os.path.join(root, current_file))
                #break  # TODO remove (only for testing with one file)

    file_count = file_list.qsize()

    def worker():
        def rule_parser(tagged_data):
            parser = nltk.RegexpParser('''
                NP:   {<NN|NNS|NNP|NNPS|NE>}
                NPs:  {<NP> (<,|CC> <NP>)+}
            ''')

            return parser.parse(tagged_data)

        def get_nltk_word(data):
            if isinstance(data, nltk.tree.Tree):
                if isinstance(data[0], tuple):
                    return data[0][0]
                else:
                    return data[0]
            else:
                return data[0]

        def add_to_dict(hypernym, hyponym):
            if not hyponym in word_dict.keys():
                old_list = word_dict.get(hypernym)

                if not old_list:
                    old_list = [hyponym]
                else:
                    if not hyponym in old_list:
                        old_list.append(hyponym)

                word_dict[hypernym] = old_list

        def apply_rules(data, position):
            for rule in rules:
                # search right side
                if rule[0] == 'HYPERNYM':
                    possible_hypernym = get_nltk_word(data[position])
                    error = False
                    word_count = 1

                    for word in rule[1:-1]:
                        try:
                            if word != get_nltk_word(data[position + word_count]):
                                error = True

                            word_count += 1
                        except IndexError:
                            pass

                    try:
                        if not error:
                            if isinstance(data[position + word_count], nltk.tree.Tree):
                                if data[position + word_count].node == 'NP' and rule[-1] == 'NP':
                                    add_to_dict(possible_hypernym, data[position + word_count][0][0])
                                    break
                                elif data[position + word_count].node == 'NPs' and rule[-1] == 'NPs':
                                    for node in data[position + word_count]:
                                        if isinstance(node, nltk.tree.Tree):
                                            add_to_dict(possible_hypernym, node[0][0])
                                            break
                    except IndexError:
                        pass

                # search left side
                elif rule[-1] == 'HYPERNYM':
                    possible_hypernym = get_nltk_word(data[position])
                    error = False
                    word_count = -1
                    nrule = list(rule)
                    nrule.reverse()

                    for word in nrule[1:-1]:
                        try:
                            if word != get_nltk_word(data[position + word_count]):
                                error = False

                            word_count -= 1
                        except IndexError:
                            pass

                    try:
                        if not error:
                            if isinstance(data[position + word_count], nltk.tree.Tree):
                                if data[position + word_count].node == 'NP' and rule[-1] == 'NP':
                                    add_to_dict(possible_hypernym, data[position + word_count][0][0])
                                    break
                                elif data[position + word_count].node == 'NPs' and rule[-1] == 'NPs':
                                    for node in data[position + word_count]:
                                        if isinstance(node, nltk.tree.Tree):
                                            add_to_dict(possible_hypernym, node[0][0])
                                            break
                    except IndexError:
                        pass

        while not file_list.empty():
            input_file = file_list.get()

            tagged_data = rule_parser(pickle.load(open(input_file, 'rb')))

            for n in range(len(tagged_data)):
                if isinstance(tagged_data[n], nltk.tree.Tree):
                    if tagged_data[n].node == 'NP':
                        apply_rules(tagged_data, n)

            percentage = 100.0 - ((float(file_list.qsize()) / float(file_count)) * 100.0)
            sys.stdout.write("\rProgress: {0:.2f}%".format(percentage))
            sys.stdout.flush()

            file_list.task_done()

    sys.stdout.write("\rProgress: 0.00%")

    for pid in range(8):
        process = Process(target=worker, args=())
        process.daemon = True
        process.start()

    file_list.join()
    print('')

    pickle_dict = dict()

    for key in word_dict.keys():
        pickle_dict[key] = word_dict.get(key)

    pickle.dump(pickle_dict, open(output_dict, 'wb+'), 2)