.replace("ENDBITWIDTH",str(interval_end))       \
                        .replace("PLATFORM",options.platform_name)

    os.system(("(cd PATH; cd PLATFORM; mkdir DIRNAME; cp STD_MICROBENCHPYNAME DIRNAME/MICROBENCHPYNAME)")\
                            .replace("PATH", pathHead)                                      \
                            .replace("PLATFORM",options.platform_name)                      \
                            .replace("DIRNAME",dirName)                                     \
                            .replace("STD_MICROBENCHPYNAME",std_microBenchPyName)           \
                            .replace("MICROBENCHPYNAME",microBenchPyName))
    scripts_ForMicroBenchmarks.append(pathHead+"/"+options.platform_name+"/"+dirName+"/"+microBenchPyName)
    results_ForMicroBenchmarks.append(pathHead+"/"+options.platform_name+"/"+dirName+"/overview")
    interval_head += intervalLen 

processes_ForMicroBenchmarks = []
for script in scripts_ForMicroBenchmarks:
    processes_ForMicroBenchmarks.append(Process(target=runMicroBenchmarks,args=(script,)))

for tmp_proc in processes_ForMicroBenchmarks:
    tmp_proc.start()
    
for tmp_proc in processes_ForMicroBenchmarks:
    tmp_proc.join()

resultDataStrs = []
for result_file_name in results_ForMicroBenchmarks:
    subResultFile = open(result_file_name, 'r')
    resultDataStrs = resultDataStrs + subResultFile.readlines()
    subResultFile.close()

resultFile = open(pathHead+"/"+instName, 'w')
for data in resultDataStrs:
Beispiel #2
0
def get(url):
    start = time.time()
    try:
        requests.get(url)
    except:
        ok = 0
    else:
        ok = 1
    finally:
        rt = time.time() - start
        return {'ok': ok, 'rt': rt}


while True:
    if task.empty():
        print 'no task yet, wait 5s...'
        time.sleep(5)
        continue
    try:
        i = task.get(timeout=10)
        '''
        这里操作解析url, 查找商品,放入redis
        '''
        p = Process(target=deal_url, args=(format(i), ))
        p.start()
        p.join()
        o = get(i)
        #向resultQueue输出结果
        result.put({'i': i, 'o': o})
    except Exception, e:
        print 'Error: {0}'.format(e)
import random
import time
from multiprocessing import Process


def get_random_string(length):
    letters = string.ascii_lowercase
    ret = ''.join(random.choice(letters) for i in range(length))
    return ret

def worker(rep):
	cli=LedgerCompliance.client.Client(API, HOST, PORT)
	cli.connect()
	for i in range(0,rep):
		msg_key="KEY_"+get_random_string(8)
		msg_val="VALUE_"+get_random_string(random.randint(50,1000))
		cli.safeSet(msg_key.encode(),msg_val.encode())
		res=cli.safeGet(msg_key.encode())
		assert res.verified

proclist=[]
for i in range(0,4):
	proclist.append(Process(target=worker, args=(100,)))
t0=time.time()
for p in proclist:    
	p.start()
for p in proclist:
	p.join()
t1=time.time()
print("Run time: {} seconds".format(t1-t0))
		self.channel = self.conn.channel()
		self.channel.exchange_declare(exchange_name, 'topic')
		queueId = self.channel.queue_declare( exclusive = True ).queue
		self.channel.queue_bind(queueId, exchange_name, self.routingKey)
		try:
		    self.channel.basic_consume(queueId, callback=self.callback_rdm)
		except KeyboardInterrupt:
		    self.channel.close()
		    self.conn.close()

		while True:
			self.conn.drain_events()


	def callback_rdm(self, message):
			#print("Body:'%s', Proeprties:'%s', DeliveryInfo:'%s'" % ( message.body, message.properties, message.delivery_info))
			print message.body
			#message.ack()
			#channel.basic_publish(message.body, exchange_name, arg_rky)

	def close(self):
		self.conn.close()

def processStart(rkey):
	StateService(rkey).run()

if __name__ == '__main__':
	s = r.grp['state']
	for x in s:
		p = Process(target=processStart, args=(x,)).start()
    	#p.join()
Beispiel #5
0
    print(f'Result of {n} square : {result}')


# 메인
if __name__ == '__main__':
    # 부모 프로세스 아이디
    parent_process_id = os.getpid()

    # 출력
    print(f'Parent Process ID : {parent_process_id}')

    # 프로세스 리스트 선언
    processes = list()

    # 프로세스 생성 및 실행
    for i in range(1, 10):
        # 생성
        p = Process(name=str(i), target=square, args=(i, ))

        # 배열에 담기
        processes.append(p)

        # 시작
        p.start()

    for process in processes:
        process.join()

    # 종료
    print('Main-Processing Done!')
Beispiel #6
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('env_name', type=str)
    parser.add_argument('--exp_name', type=str, default='vpg')
    parser.add_argument('--render', action='store_true')
    parser.add_argument('--discount', type=float, default=1.0)
    parser.add_argument('--n_iter', '-n', type=int, default=100)
    parser.add_argument('--batch_size', '-b', type=int, default=1000)
    parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
    parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
    parser.add_argument('--reward_to_go', '-rtg', action='store_true')
    parser.add_argument('--dont_normalize_advantages',
                        '-dna',
                        action='store_true')
    parser.add_argument('--nn_baseline', '-bl', action='store_true')
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--n_experiments', '-e', type=int, default=1)
    parser.add_argument('--n_layers', '-l', type=int, default=2)
    parser.add_argument('--size', '-s', type=int, default=64)
    args = parser.parse_args()

    if not (os.path.exists('data')):
        os.makedirs('data')
    logdir = args.exp_name + '_' + args.env_name + \
        '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
    logdir = os.path.join('data', logdir)
    if not (os.path.exists(logdir)):
        os.makedirs(logdir)

    max_path_length = args.ep_len if args.ep_len > 0 else None

    processes = []

    for e in range(args.n_experiments):
        seed = args.seed + 10 * e
        print('Running experiment with seed %d' % seed)

        def train_func():
            train_PG(exp_name=args.exp_name,
                     env_name=args.env_name,
                     n_iter=args.n_iter,
                     gamma=args.discount,
                     min_timesteps_per_batch=args.batch_size,
                     max_path_length=max_path_length,
                     learning_rate=args.learning_rate,
                     reward_to_go=args.reward_to_go,
                     animate=args.render,
                     logdir=os.path.join(logdir, '%d' % seed),
                     normalize_advantages=not (args.dont_normalize_advantages),
                     nn_baseline=args.nn_baseline,
                     seed=seed,
                     n_layers=args.n_layers,
                     size=args.size)

        # # Awkward hacky process runs, because Tensorflow does not like
        # # repeatedly calling train_PG in the same thread.
        p = Process(target=train_func, args=tuple())
        p.start()
        processes.append(p)
        # if you comment in the line below, then the loop will block
        # until this process finishes
        # p.join()

    for p in processes:
        p.join()
Beispiel #7
0
def executedcommand(stack, rstack, lstack, com, opr, pc, pre, top, rtop, ltop,
                    address, value, tablecount, variable_region, lock,
                    process_number, process_path, count_pc, process_count,
                    terminate_flag, flag_number):
    if com[pc] == 1:  #push 演算スタックに即値を積む
        top = push(opr[pc], stack, top)
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 2:  #load 共有変数スタックから値を読み出し演算スタックに積む
        value.acquire()
        c = value[search_table(opr[pc], process_path)]
        value.release()
        top = push(c, stack, top)
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 3:  #store 変数の値を更新し値スタックにそれまでの変数の値及びプロセス番号とパスを積む
        value.acquire()
        with open("value_stack.txt", 'a') as f:
            f.write(
                str(value[search_table(opr[pc], process_path)]) + ' ' +
                str(process_number) + '.' + process_path + '\n')
        f.close()
        rtop.value = rtop.value + 2
        (value[search_table(opr[pc], process_path)], top) = pop1(stack, top)
        value.release()
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 4:  #jpc スタックトップの値をポップしその値が1なら被演算子の番地にジャンプする
        (c, top) = pop1(stack, top)
        if c == 1:
            pre = pc
            pc = opr[pc] - 2
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 5:  #jmp 無条件で被演算子の番地にジャンプする
        pre = pc
        pc = opr[pc] - 2
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 6:  #op 被演算子の種類の演算を行う
        if (opr[pc]) == 0:  #'+'
            (c, top) = pop1(stack, top)
            (d, top) = pop1(stack, top)
            top = push(c + d, stack, top)
        elif (opr[pc]) == 1:  #'*'
            (c, top) = pop1(stack, top)
            (d, top) = pop1(stack, top)
            top = push(c * d, stack, top)
        elif opr[pc] == 2:  #'-'
            (c, top) = pop1(stack, top)
            (d, top) = pop1(stack, top)
            top = push(d - c, stack, top)
        elif opr[pc] == 3:  #'>'
            (c, top) = pop1(stack, top)
            (d, top) = pop1(stack, top)
            if d > c:
                top = push(1, stack, top)
            else:
                top = push(0, stack, top)
        elif opr[pc] == 4:  #'=='
            (c, top) = pop1(stack, top)
            (d, top) = pop1(stack, top)
            if d == c:
                top = push(1, stack, top)
            else:
                top = push(0, stack, top)
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 7:  #label ラベルスタックにジャンプ前のPCの値とプロセス番号を積む
        if args[2] == 'f':
            with open("label_stack.txt", 'a') as f:
                f.write(str(pre + 1) + ' ' + str(process_number) + '\n')
            f.close()
            ltop.value = ltop.value + 2
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 21:  #rjmp ラベルスタックから値を取り出しそのPCにジャンプする
        a = count_pc - int(lstack[ltop.value])
        ltop.value = ltop.value - 2
        pre = pc
        return (a, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 22:  #restore 値スタックから値を取り出し共有変数スタックに保存する
        s2 = re.search(r'([a-z]\d+\.)+', rstack[rtop.value + 1])
        process_path = s2.group() + ".E"
        value[search_table(opr[pc], process_path)] = int(rstack[rtop.value])
        rtop.value = rtop.value - 2
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 19 or com[pc] == 28:  #nop no operation
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 8 or com[pc] == 23:  #par 並列ブロックの開始と終了を示す
        #if opr[pc]==1:
        #    terminate_flag[flag_number]=1
        #    print(str(flag_number))
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 9:  #alloc 新しい変数の番地を確保し初期値を0にする
        #top=push(0,stack,top)
        if args[2] == 'f':
            value[tablecount.value] = 0
            variable_region.append(0)
            with open("variable_table.txt", 'a') as f:
                f.write(str(opr[pc]) + '.' + process_path + '      0\n')
            tablecount.value = tablecount.value + 1
        elif args[2] == 'b':
            variable_path = search_table(opr[pc], process_path)
            variable_region.append(0)
            with open("variable_table.txt", 'r') as f:
                variable_table = f.read().split('\n')
            s = re.search(r'\s(-)?(\d+)', variable_table[variable_path])
            variable_value = int(s.group().strip(' '))
            value[search_table(opr[pc], process_path)] = variable_value
            tablecount.value = tablecount.value + 1
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 10:  #free 変数の番地の解放を示し値スタックにそれまでの値を保存する
        table_address = search_table(opr[pc], process_path)
        value.acquire()
        with open("value_stack.txt", 'a') as f:
            f.write(
                str(value[search_table(opr[pc], process_path)]) + ' ' +
                str(process_number) + '.' + process_path + '\n')
        f.close()
        value.release()
        value[table_address] = 0
        pre = pc
        variable_region[opr[pc]] = value[opr[pc]]
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 11:  #proc 手続きの開始 label命令とblock命令を行う 返り番地を演算スタックに積む
        if args[2] == 'f':
            with open("label_stack.txt", 'a') as f:
                f.write(str(pre + 1) + ' ' + str(process_number) + '\n')
            f.close()
            push(pre + 1, stack, top)
        pre = pc
        process_path = 'p' + str(opr[pc]) + '.' + process_path
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 12:  #ret 手続きの終了 演算スタックから返り番地を取り出しその番地にジャンプする
        (c, top) = pop1(stack, top)
        for i in range(0, len(process_path), 1):
            if process_path[i] == '.':
                process_path = process_path[i + 1:len(process_path)]
                break
        pre = pc
        return (c, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 13:  #block パスを追加
        if com[pc + 3] == 16 and (com[pc + 1] == 5 or com[pc + 1] == 8):
            process_path = 'c' + str(opr[pc]) + '.' + process_path
        else:
            process_path = 'b' + str(opr[pc]) + '.' + process_path
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 14:  #end パスの削除
        for i in range(0, len(process_path), 1):
            if process_path[i] == '.':
                process_path = process_path[i + 1:len(process_path)]
                break
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 15:  #fork 並列プロセスの生成
        lock.release()
        process = {}
        start_process_count = process_count.value
        already_terminate = {}
        f = open('a' + (str)(opr[pc]) + '.txt', mode='r')
        tables = f.read()
        #並列ブロックを参照しそれぞれ開始番地と終了番地を取り出して各プロセスに与えプロセスを生成する
        for i in range(0, len(tables), 10):
            t1 = tables[i:i + 4]
            s1 = re.search(r'\d+', t1)
            t2 = tables[i + 5:i + 9]
            s2 = re.search(r'\d+', t2)
            terminate_flag[process_count.value] = 0
            process[process_count.value] = Process(
                target=execution,
                args=(com, opr, (int)(s1.group()), (int)(s2.group()), count_pc,
                      stack, address, value, tablecount, rstack, lstack, rtop,
                      ltop, 0, variable_region, lock, process_number + '.' +
                      str(process_count.value - start_process_count + 1),
                      process_path, process_count, terminate_flag,
                      process_count.value))
            process_count.value = process_count.value + 1
        end_process_count = process_count.value
        for i in range(start_process_count, process_count.value, 1):
            process[i].start()
        terminate_count = 0
        #自分が生成したプロセスが終了しているかどうかを監視する.終了している場合terminateでプロセスを完全終了させる
        for i in range(0, 100, 1):
            already_terminate[i] = 0
        while True:
            for i in range(start_process_count, end_process_count, 1):
                if terminate_flag[i] == 1 and already_terminate[i] == 0:
                    process[i].terminate()
                    process[i].join()
                    already_terminate[i] = 1
                    terminate_count = terminate_count + 1
                    if not process[i].is_alive():
                        process[i].join()
            if terminate_count == end_process_count - start_process_count:
                pre = pc
                lock.acquire()
                return (int(s2.group()), pre, stack, top, rtop, tablecount,
                        process_path)
        pre = pc
        lock.acquire()
        return (a, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 16:  #merge 並列ブロックの終了を示す
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 17:  #func 関数の開始 label命令とblock命令を行い演算スタックトップの一つ下に帰り番地を積む
        if args[2] == 'f':
            with open("label_stack.txt", 'a') as f:
                f.write(str(pre + 1) + ' ' + str(process_number) + '\n')
            f.close()
            (c, top) = pop1(stack, top)
            push(pre + 1, stack, top)
            push(c, stack, top)
        pre = pc
        process_path = 'f' + str(opr[pc]) + '.' + process_path
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 18:  #f_return 関数の終了 演算スタックの一つしたの値を取り出しその番地にジャンプする
        (d, top) = pop1(stack, top)
        (c, top) = pop1(stack, top)
        push(d, stack, top)
        for i in range(0, len(process_path), 1):
            if process_path[i] == '.':
                process_path = process_path[i + 1:len(process_path)]
                break
        pre = pc
        return (c, pre, stack, top, rtop, tablecount, process_path)
        #elif com[pc]==19:#w_label whileの開始を示しlabel命令とblock命令を行う
        #with open("label_stack.txt",'a') as f:
        #    f.write(str(pre+1)+' '+str(process_number)+'\n')
        #f.close()
        #process_path='w'+str(opr[pc])+'.'+process_path
        #pre=pc
        #return (pc+1,pre,stack,top,rtop,tablecount,process_path)
        #elif com[pc]==20:#w_end whileの終了を示しlabel命令とend命令を行う
        #with open("label_stack.txt",'a') as f:
        #    f.write(str(pre+1)+' '+str(process_number)+'\n')
        #f.close()
        #process_path=process_path.replace('w'+str(opr[pc])+'.','')
        #return (pc+1,pre,stack,top,rtop,tablecount,process_path)
    elif com[
            pc] == 24:  #r_alloc 逆向きのalloc命令 値スタックから値を取り出し 確保した共有変数スタックの番地に保存する
        s2 = re.search(r'([a-z]\d+\.)+', rstack[rtop.value + 1])
        process_path = s2.group()
        with open("variable_table.txt", 'a') as f:
            f.write(str(opr[pc]) + '.' + process_path + 'E      0\n')
        value[tablecount.value] = int(rstack[rtop.value])
        tablecount.value = tablecount.value + 1
        rtop.value = rtop.value - 2
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 25:  #r_free 逆向きのfree命令 変数の解放を示す
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 26:  #r_fork 逆向きのfork命令 前向きとは逆順に並列テーブルを参照する
        lock.release()
        process = {}
        start_process_count = process_count.value
        f = open('a' + (str)(opr[pc]) + '.txt', mode='r')
        already_terminate = {}
        tables = f.read()
        tables_process_number = int(len(tables) / 10)
        print(str(tables_process_number))
        for i in range(0, len(tables), 10):
            t1 = tables[i:i + 4]
            s1 = re.search(r'\d+', t1)
            t2 = tables[i + 5:i + 9]
            s2 = re.search(r'\d+', t2)
            terminate_flag[process_count.value] = 0
            process[process_count.value] = Process(
                target=execution,
                args=(com, opr, count_pc - (int)(s2.group()) + 1,
                      count_pc - (int)(s1.group()) + 1, count_pc, stack,
                      address, value, tablecount, rstack, lstack, rtop, ltop,
                      0, variable_region, lock, process_number + '.' +
                      str(process_count.value - start_process_count + 1),
                      process_path, process_count, terminate_flag,
                      process_count.value))
            process_count.value = process_count.value + 1
        end_process_count = process_count.value
        for i in range(start_process_count, process_count.value, 1):
            process[i].start()
        terminate_count = 0
        for i in range(0, 100, 1):
            already_terminate[i] = 0
        t3 = tables[0:0 + 4]
        s3 = re.search(r'\d+', t3)
        while True:
            for i in range(start_process_count, end_process_count, 1):
                if terminate_flag[i] == 1 and already_terminate[i] == 0:
                    process[i].terminate()
                    process[i].join()
                    already_terminate[i] = 1
                    terminate_count = terminate_count + 1
                    if not process[i].is_alive():
                        process[i].join()
            if terminate_count == end_process_count - start_process_count:
                pre = pc
                lock.acquire()
                return (count_pc - int(s3.group()) + 1, pre, stack, top, rtop,
                        tablecount, process_path)
        for i in range(start_process_count, process_count.value, 1):
            process[i].join()
        a = count_pc - int(s3.group())
        pre = pc
        lock.acquire()
        return (a, pre, stack, top, rtop, tablecount, process_path)
    elif com[pc] == 27:  #r_merge 逆向きのmerge
        pre = pc
        return (pc + 1, pre, stack, top, rtop, tablecount, process_path)
def compute_ml_data(filename, mode, previous = None):
    print "Compute regression data: {}, {}, {}".format(filename, mode, previous)
    df = pd.read_pickle(filename + "_post")
    print "Loaded df {}".format(df.shape)

    # ml_data = cPickle.load(open("ml_dataset"))
    #truth_dict, truth_set = load_truth_set()
    #positive_negative = cPickle.load(open("positive_negative.out"))

    if mode == "train":
        truth_dict, truth_set = load_truth_set()
    
        positive_set = cPickle.load(open("blocking_positive_set_{}".format(mode)))
        
        # Add all available training sets !
        positive_set |= truth_set

        positive_list = [((int(p[0]), int(p[1])), 1) for p in positive_set]

        negative_set = cPickle.load(open("blocking_negative_set_{}".format(mode)))
        negative_list = [((int(p[0]), int(p[1])), 0) for p in negative_set]
        
        new_negatives = min(len(negative_list), 20 * len(positive_list))

        print "Undersampling negatives: {} -> {}".format(len(negative_list), new_negatives)
        negative_list = rnd.sample(negative_list, new_negatives)

        assert not (negative_set & positive_set)

        print "Train: {} {}".format(len(positive_list), len(negative_list))

        positive_negative = positive_list + negative_list

        del positive_set
        del negative_set
        del positive_list
        del negative_list
        del truth_set
        del truth_dict
    else:
        total_set = cPickle.load(open("blocking_total_set_test"))

        if previous:
            prev_set = cPickle.load(open(previous[0]))
            print "Previous blocking_set defined : {}".format(prev_set)
            total_set = total_set - prev_set

        total_list = [((int(p[0]), int(p[1])), 2) for p in total_set]
        positive_negative = total_list

        print "Test: {}".format(len(total_list))

    #total_token_list = [t for el in df.norm_name for t in el.split() if len(t) >= 4]
    #rarity_counter = Counter(total_token_list)

    print "Garbage collection: {}".format(gc.collect())

    rnd.shuffle(positive_negative)

    #print positive_negative
    #n_cores = cpu_count()
    n_cores = 3
    n_list = len(positive_negative)
    n_block = n_list / n_cores + 1

    jobs = []
    data = []
    
    for i in range(n_cores):
        start_id = i * n_block
        end_id = min (n_list, start_id + n_block)
        jobs.append(Process(target = bulk_process, args = (df, positive_negative[start_id:end_id], "job_{}".format(i))))

    for j in jobs:
        j.start()

    for j in jobs:
        j.join()    
    
    print "Sleeping for 2 seconds"
    time.sleep(2)    

    for i in range(n_cores):
        print "Processing file {}".format(i)
        tmp_filename = "bulk_process_{}_tmp".format("job_{}".format(i))
        data_tmp = cPickle.load(open(tmp_filename, 'rb'))
        system("rm -f {}".format(tmp_filename))
        data.extend(data_tmp)

    print "data length: {}".format(len(data))

    ml_df = pd.DataFrame(data, columns = column_features())
    if previous:
        print "Previous df defined : {}".format(previous[1])
        prev_df = pd.read_pickle(previous[1])
        ml_df = pd.concat([ml_df, prev_df])


    #print "Computing feature for {} pairs".format(len(positive_negative))

    #data = Parallel(n_jobs=10)(delayed(compute_features)(df.loc[int(pair[0])], df.loc[int(pair[1])], index, pair, label) for index, (pair, label) in enumerate(positive_negative))

    ml_df.to_pickle("ml_{}_set".format(mode))
def main(use_multiprocessing=False, log_level=None):
    """ 
        Create and run a tello controller :
        1) get the video stream from the tello
        2) wait for keyboard commands to pilot the tello
        3) optionnally, process the video frames to track a body and pilot the tello accordingly.

        If use_multiprocessing is True, the parent process creates a child process ('worker')
        and the workload is shared between the 2 processes.
        The parent process job is to:
        - get the video stream from the tello and displays it in an OpenCV window,
        - write each frame in shared memory at destination of the child, 
        each frame replacing the previous one (more efficient than a pipe or a queue),
        - read potential command from the child (currently, only one command:EXIT).
        Commands are transmitted by a Pipe.
        The child process is responsible of all the others tasks:
        - process the frames read in shared memory (openpose, write_hud),
        - if enable, do the tracking (calculate drone commands from position of body),
        - read keyboard commands,
        - transmit commands (from tracking or from keyboard) to the tello, and receive message from the tello.

    """
    global tello
    
    if use_multiprocessing:
        # Create the pipe for the communication between the 2 processes
        parent_cnx, child_cnx = Pipe()
    else:
        child_cnx = None

    tello = TelloController(use_face_tracking=False, 
                            kbd_layout="QWERTY", 
                            write_log_data=False, 
                            log_level=log_level, child_cnx=child_cnx)
   
    first_frame = True  
    frame_skip = 300

    for frame in tello.container.decode(video=0):
        if 0 < frame_skip:
            frame_skip = frame_skip - 1
            continue
        start_time = time.time()
        if frame.time_base < 1.0/60:
            time_base = 1.0/60
        else:
            time_base = frame.time_base

        
        # Convert frame to cv2 image
        frame = cv2.cvtColor(np.array(frame.to_image(),dtype=np.uint8), cv2.COLOR_RGB2BGR)
        frame = cv2.resize(frame, (640,480))
        if use_multiprocessing:
            
            if first_frame:
                
                # Create the shared memory to share the current frame decoded by the parent process 
                # and given to the child process for further processing (openpose, write_hud,...)
                frame_as_ctypes = np.ctypeslib.as_ctypes(frame)
                tello.shared_array = sharedctypes.RawArray(frame_as_ctypes._type_, frame_as_ctypes)
                tello.frame_shape = frame.shape
                first_frame = False
                # Launch process child
                p_worker = Process(target=openpose_worker)
                p_worker.start()
            # Write the current frame in shared memory
            tello.shared_array[:] = np.ctypeslib.as_ctypes(frame.copy())
            # Check if there is some message from the child
            if parent_cnx.poll():
                msg = parent_cnx.recv()
                if msg == "EXIT":
                    print("MAIN EXIT")
                    p_worker.join()
                    tello.drone.quit()
                    cv2.destroyAllWindows()
                    exit(0)
        else:
            frame = tello.process_frame(frame)
            tello.sound_player.play()

        if not use_multiprocessing: tello.fps.update()

        # Display the frame
        cv2.imshow('Tello', frame)

        cv2.waitKey(1)

        frame_skip = int((time.time() - start_time)/time_base)
    def test_read(self):
        ##
        ## run service
        ##
        checker_service_process = Process(target=vote_contract.run_checker_service)
        checker_service_process.start()
        time.sleep(0.1)

        ##
        ## create transaction
        ##
        # number of voters and values
        options = ['alice', 'bob', 'sally']
        num_voters = 50
        values = [[1, 0, 0] for _ in range(0, num_voters)]

        # create keys and particpants
        params = setup()
        (tally_priv, tally_pub) = key_gen(params)
        keys = [key_gen(params) for _ in range(0, num_voters)]
        participants = [pack(pub) for (_, pub) in keys]

        # get init token
        init_transaction = vote.init()

        # get initial scores
        create_vote_transaction = vote.create_vote(
            (init_transaction['transaction']['outputs'][0],),
            None,
            None,
            dumps(options),
            dumps(participants),
            pack(tally_priv),
            pack(tally_pub)
        )
        vote_0 = create_vote_transaction['transaction']['outputs'][1]

        # add votes
        transaction = {}
        input_obj = vote_0
        for i in range(0, num_voters):
            transaction = vote.add_vote(
                (input_obj,),
                None,
                None,
                dumps(values[i]), # votes' valu (0 or 1)
                pack(keys[i][0]), # voter's priv key
                pack(keys[i][1])  # voter's pub key
            )
            input_obj = transaction['transaction']['outputs'][0]

        # tally
        transaction = vote.tally(
            (input_obj,),
            None,
            None,
            pack(tally_priv),
            pack(tally_pub)
        )
        result = transaction['transaction']['outputs'][0]

        # read result
        transaction = vote.read(
            None,
            (result,),
            None,
        )

        # print result
        print transaction['transaction']['returns']


        ##
        ## submit transaction
        ##
        response = requests.post(
            'http://127.0.0.1:5000/' + vote_contract.contract_name + '/read', json=transaction_to_solution(transaction)
        )
        self.assertTrue(response.json()['success'])

        ##
        ## stop service
        ##
        checker_service_process.terminate()
        checker_service_process.join()
Beispiel #11
0
def process_multi_threaded(fasta,
                           bam_fname,
                           fastq1,
                           sidecar_fname,
                           fastq2=None,
                           threads=1,
                           max_templates=None,
                           platform='Illumina',
                           sample_name='Seven',
                           cigar_v2=True,
                           do_not_index=False):
    """

  :param bam_fname:
  :param bam_hdr:
  :param fastq1:
  :param sidecar_fname: File containing just the long qnames
  :param fastq2:
  :param threads:
  :param max_templates:
  :param platform:
  :param sample_name:
  :param cigar_v2: If True, write out CIGARs in V2 format
  :param do_not_index: If True, the output BAMs will be collated into one bam, sorted and indexed
                       the N output BAMs created by the individual workers will be deleted at the end.
                       If False, the N output BAMs created by the individual workers will remain. This
                       option allows users to merge, sort and index the BAM fragments with their own tools
  :return:

  Note: The pysam sort invocation expects 1GB/thread to be available
  """
    long_qname_table = load_qname_sidecar(sidecar_fname)

    rg_id = 'rg{}'.format(hash(' '.join(sys.argv)))
    fasta_ann = fasta + '.ann'
    bam_hdr = construct_header(fasta_ann,
                               rg_id=rg_id,
                               sample=sample_name,
                               platform=platform)

    # Start worker processes
    logger.debug('Starting {} processes'.format(threads))
    file_fragments = [
        '{}.{:04}.bam'.format(bam_fname, i) for i in range(threads)
    ]

    in_queue = Queue()
    p_list = [
        Process(target=disciple,
                args=(file_fragments[i], bam_hdr, rg_id, long_qname_table,
                      cigar_v2, in_queue)) for i in range(threads)
    ]
    for p in p_list:
        p.start()

    t0 = time.time()

    # Burn through file
    logger.debug('Starting to read FASTQ file')
    fastq_l = [pysam.FastxFile(fastq1)]
    if fastq2 is not None: fastq_l += [pysam.FastxFile(fastq2)]

    cnt = 0
    for cnt, reads in enumerate(zip(*fastq_l)):
        # qname, [(seq, qual) ... ]
        in_queue.put((reads[0].name, [(r.sequence, r.quality) for r in reads]))
        if max_templates is not None and cnt >= max_templates:
            break
        if cnt % 100000 == 0:
            logger.debug('Read {} templates'.format(cnt))

    # Tell child processes to stop
    logger.debug('Stopping child processes')
    for i in range(threads):
        in_queue.put(__process_stop_code__)

    # Wait for them to finish
    for p in p_list:
        p.join()

    t1 = time.time()
    logger.debug('Processed {} templates in {:0.2f}s ({:0.2f} t/s)'.format(
        cnt, t1 - t0, cnt / (t1 - t0)))

    merge_sorted_fragments(bam_fname,
                           file_fragments,
                           do_not_index=do_not_index)
    def test_add_vote(self):
        ##
        ## run service
        ##
        checker_service_process = Process(target=vote_contract.run_checker_service)
        checker_service_process.start()
        time.sleep(0.1)

        ##
        ## create transaction
        ##
        # create keys
        params = setup()
        (tally_priv, tally_pub)   = key_gen(params)
        (voter1_priv, voter1_pub) = key_gen(params)
        (_, voter2_pub)           = key_gen(params)
        (_, voter3_pub)           = key_gen(params)

        # set up options, particpants, and tally's key
        options      = ['alice', 'bob']
        participants = [pack(voter1_pub), pack(voter2_pub), pack(voter3_pub)]

        # get init token
        init_transaction = vote.init()
        token = init_transaction['transaction']['outputs'][0]

        # initialise vote (all votes are zero)
        create_vote_transaction = vote.create_vote(
            (token,),
            None,
            None,
            dumps(options),
            dumps(participants),
            pack(tally_priv),
            pack(tally_pub)
        )
        old_vote = create_vote_transaction['transaction']['outputs'][1]

        # add a vote
        transaction = vote.add_vote(
            (old_vote,),
            None,
            None,
            dumps([1, 0]),
            pack(voter1_priv),
            pack(voter1_pub)
        )
        print transaction

        ##
        ## submit transaction
        ##
        response = requests.post(
            'http://127.0.0.1:5000/' + vote_contract.contract_name + '/add_vote', json=transaction_to_solution(transaction)
        )
        self.assertTrue(response.json()['success'])

        ##
        ## stop service
        ##
        checker_service_process.terminate()
        checker_service_process.join()
def main(_):
    parser = argparse.ArgumentParser(description='TransE.')
    parser.add_argument('--lr', dest='lr', type=float, help="Learning rate", default=0.005)
    parser.add_argument('--L1_flag', dest='L1_flag', type=int, help="norm method", default=0)
    parser.add_argument('--margin', dest='margin', type=int, help="margin", default=1)
    parser.add_argument('--data', dest='data_dir', type=str, help="Data folder", default='../../data/baike/')
    parser.add_argument("--dim", dest='dim', type=int, help="Embedding dimension", default=150)
    parser.add_argument("--worker", dest='n_worker', type=int, help="Evaluation worker", default=3)
    parser.add_argument("--load_model", dest='load_model', type=str, help="Model file:xxx.meta", default=None)
    parser.add_argument("--max_iter", dest='max_iter', type=int, help="Max iteration", default=500)
    parser.add_argument("--train_batch", dest="train_batch", type=int, help="Training batch size", default=10240)
    parser.add_argument("--eval_batch", dest="eval_batch", type=int, help="Evaluation batch size", default=40960)
    parser.add_argument("--optimizer", dest='optimizer', type=str, help="Optimizer", default='gradient')
    parser.add_argument("--generator", dest='n_generator', type=int, help="Data generator", default=10)
    parser.add_argument("--save_dir", dest='save_dir', type=str, help="Model path", default='./log/')
    parser.add_argument("--save_per", dest='save_per', type=int, help="Save per x iteration", default=50)
    parser.add_argument("--eval_per", dest='eval_per', type=int, help="Evaluate every x iteration", default=5)
    parser.add_argument("--summary_dir", dest='summary_dir', type=str, help="summary directory",default='./TransE_summary/')
    parser.add_argument("--keep", dest='drop_out', type=float, help="Keep prob (1.0 keep all, 0. drop all)",default=0.5)
    parser.add_argument("--pad", dest='candidate_dim', type=int, help="dimension of the candidate",default=55)
    parser.add_argument("--prefix", dest='prefix', type=str, help="model_prefix", default='neg 1 valid')
    args=parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"]="5"

    model=TransE(data_dir=args.data_dir,train_batch=args.train_batch,eval_batch=args.eval_batch,L1_flag=args.L1_flag,margin=args.margin, dim = args.dim)
    pos_triple,neg_triple,train_loss,train_op = train_ops(model,learning_rate=args.lr,optimizer_str=args.optimizer)
    test_input,test_head,test_tail,test_relation=test_ops(model)
    normalize_entity_op,normalize_relation_op=normalize_ops(model)
    candidate_test_input,candidate_input,candidate_tail_ids, candidate_hrt_res = pre_in_candidates_ops(model)

    init=tf.global_variables_initializer()
    config = tf.ConfigProto() 
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    saver = tf.train.Saver()

    # run tensorflow
    with tf.Session(config=config) as sess:
            sess.run(init)
            iter_offset = 0
            total_inst=model.n_train
            raw_training_data_queue = Queue()
            training_data_queue = Queue()
            evaluation_queue=JoinableQueue()
            result_queue=Queue()
            data_generators = list()
            for i in range(args.n_generator):
                data_generators.append(Process(target=data_generator_func,args=(
                                raw_training_data_queue,training_data_queue,model.right_num,model.left_num,model.train_tr_h,model.train_hr_t,model.train_ht_r,
                                model.n_entity,model.n_relation)))
                data_generators[-1].start()
            for i in range(args.n_worker):
                worker=Process(target=worker_func,args=(evaluation_queue,result_queue,
                               model.tr_h,model.hr_t,model.ht_r))
                worker.start()
                print("work %d start!"% i)
            for n_iter in range(iter_offset,args.max_iter):
                start_time = timeit.default_timer()
                accu_loss = 0.0
                ninst = 0
                nbatches_count = 0
                for dat in model.raw_training_data(batch_size=args.train_batch):
                    raw_training_data_queue.put(dat)
                    nbatches_count += 1
                while nbatches_count > 0:

                    nbatches_count -= 1

                    pos_triple_batch,neg_triple_batch= training_data_queue.get()

                    sess.run([normalize_entity_op,normalize_relation_op])

                    loss, _= sess.run([train_loss,train_op], feed_dict={pos_triple:pos_triple_batch,
                                                                        neg_triple:neg_triple_batch})
                    accu_loss += loss
                    ninst += pos_triple_batch.shape[0]

                    if ninst % (5000) is not None:
                        print(
                        '[%d sec](%d/%d) : %.2f -- loss : %.5f ' % (
                            timeit.default_timer() - start_time, ninst, total_inst, float(ninst) / total_inst,
                            loss / (pos_triple_batch.shape[0])),end='\r')
                print("iter %d avg loss %.5f, time %.3f" % (n_iter, accu_loss / ninst, timeit.default_timer() - start_time))

                if n_iter != 0 and (n_iter % args.save_per == 0 or n_iter == args.max_iter - 1):
                    save_path=saver.save(sess,os.path.join(
                        args.save_dir,"TransE_"+str(args.prefix)+"_"+str(n_iter)+".ckpt"))
                    print("TransE Model saved at %s" % save_path)

                if n_iter!=0 and (n_iter % args.eval_per == 0 or n_iter == args.max_iter - 1):
                    test_start_time=timeit.default_timer()
                    accu_mean_rank_t = list()
                    hit_dis = []
                    na_dis = []
                    evaluation_count = 0
                    candidate_len = [0 for i in  range(56)]
                    hit = [0.0 for i in range(56)]
                    for (testing_data,candidates) in model.candidate_data(batch_size=args.eval_batch):
                        candidate_tail_pred, candidate_tail_res =sess.run([candidate_tail_ids, candidate_hrt_res],{candidate_test_input: testing_data,candidate_input:candidates})
                        temp_candidate_len , temp_hit, temp_hit_dis, temp_na_dis = candidate_evaluation_distance(testing_data,candidates, candidate_tail_pred, candidate_tail_res)
                        candidate_len += temp_candidate_len
                        hit += temp_hit
                        hit_dis += temp_hit_dis
                        na_dis += temp_na_dis
                        evaluation_queue.put((testing_data,candidates,candidate_tail_pred))
                        evaluation_count += 1
                    print(hit_dis)
                    np.save("./summary/hit_dis.npy", hit_dis)
                    np.save("./summary/na_dis.npy", na_dis)
                    candidate_len_list = []
                    acc_list = []

                    for i in range(56):
                        if candidate_len[i] != 0:
                            print("|",i,"|", candidate_len[i],"|", round(candidate_len[i]/408867.0,3), "|", hit[i], "|",round(hit[i]/candidate_len[i],3),"|")
                            candidate_len_list.append(i)
                            acc_list.append(round(hit[i]/candidate_len[i],3))
                    print("hit_dis: ", np.mean(np.asarray(hit_dis)))
                    print("na_dis: ", np.mean(np.asarray(na_dis)))
                    with open("./summary/candidate_len_list.json", "w") as f:
                        json.dump(candidate_len_list, f)
                    with open("./summary/acc_list_neg1_l2_valid.json", "w") as f:
                        json.dump(acc_list, f)

                    for i in range(args.n_worker):
                        evaluation_queue.put(None)
                    print("waiting for worker finishes their work")
                    evaluation_queue.join()
                    print("all worker stopped.")

                    while evaluation_count > 0:
                        evaluation_count -= 1
                        mrt = result_queue.get()
                        accu_mean_rank_t += mrt
                    print('cost time:[%.3f sec]'%(timeit.default_timer()-test_start_time))
                    print("margin: ", args.margin, "dim : ", args.dim, "batch_size", args.train_batch)
                    print(
                        "INITIALIZATION [TAIL PREDICTION] MEAN RANK: %.1f  HIT@10 %.3f HIT@1 %.3f tail not in candidate %.3f" %
                        (np.mean(accu_mean_rank_t), 
                        np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
                        np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 1),
                        np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) == 55)))
            for i in range(args.n_generator):
                data_generators[i].terminate()
Beispiel #14
0
from multiprocessing import Semaphore,Process 
from time import sleep 
import os 

# 创建信号量
sem = Semaphore(3)

# 系统中最多有3个进程同时执行该事件
def fun():
    sem.acquire() # 消耗信号量
    print("%d执行事件"%os.getpid())
    sleep(3)
    print("%d执行完毕"%os.getpid())
    sem.release() # 增加信号量

jobs = []
for i in range(5):
    p = Process(target = fun)
    jobs.append(p)
    p.start()

for i in jobs:
    i.join()

print(sem.get_value())

Beispiel #15
0
    # t3.start()
    # t4.start()
    # t5.start()
    # t6.start()
    # t7.start()
    # t8.start()
    # t1.join()
    # t2.join()
    # t3.join()
    # t4.join()
    # t5.join()
    # t6.join()
    # t7.join()
    # t8.join()

    p1 = Process(target=batch1, args=(100000000, 102000000))
    p2 = Process(target=batch1, args=(102000001, 104000001))
    p3 = Process(target=batch1, args=(104000002, 106000002))
    p4 = Process(target=batch1, args=(106000003, 108000003))
    p5 = Process(target=batch1, args=(108000004, 110000004))
    p6 = Process(target=batch1, args=(110000005, 112000005))
    p7 = Process(target=batch1, args=(112000006, 114000006))
    p8 = Process(target=batch1, args=(114000007, 116000007))
    p1.start()
    p2.start()
    p3.start()
    p4.start()
    p5.start()
    p6.start()
    p7.start()
    p8.start()
def p_start(text, speed):
    p = Process(target=loading, args=(text, speed,))
    p.start()
    return p
from multiprocessing import Process, Queue  #这里的Queue是multiprocessing里面的自带queue模块,跟Queue模块有区别,应该会有继承关系
import Queue as Q2


def f(q, n):
    q.put([n, 'hello'])  #注意get是随机拿一条   所以这个例子是进程间共享数据
    print q.get()
    print q.get()


if __name__ == '__main__':
    #q = Queue()
    q = Q2.Queue()
    q.put('ddd')
    for i in range(5):
        p = Process(target=f, args=(q, i))
        p.start()
    while True:
        print q.get()
#for i in range(5):
#   print i                          #linux 下运行
'''
#!/user/bin/python
#_*_coding:utf-8 _*_
from multiprocessing import Process,Queue    #这里的Queue是multiprocessing里面的自带queue模块,跟Queue模块有区别,应该会有继承关系
#import Queue as Q2

def f(q,n):
    q.put([n,'hello'])              #注意put是随机拿一条   所以这个例子是进程间共享数据
    print q.get()
    print q.get()
Beispiel #18
0
	if 'hashmeter' not in settings:
		settings['hashmeter'] = 0
	if 'scantime' not in settings:
		settings['scantime'] = 30L
	if 'rpcuser' not in settings or 'rpcpass' not in settings:
		print "Missing username and/or password in cfg file"
		sys.exit(1)

	settings['port'] = int(settings['port'])
	settings['threads'] = int(settings['threads'])
	settings['hashmeter'] = int(settings['hashmeter'])
	settings['scantime'] = long(settings['scantime'])

	thr_list = []
	for thr_id in range(settings['threads']):
		p = Process(target=miner_thread, args=(thr_id,))
		p.start()
		thr_list.append(p)
		time.sleep(1)			# stagger threads

	print settings['threads'], "mining threads started"

	print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
	try:
		for thr_proc in thr_list:
			thr_proc.join()
	except KeyboardInterrupt:
		pass
	print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])

Beispiel #19
0
 def start_receive(self, channel, end_word='end'):
     if self._job:
         return
     self._job = Process(target=self._receive_core,
                         args=(channel, end_word))
     self._job.start()
Beispiel #20
0
def main():

    # Get ip address of the given interface
    ip_address = None
    try:
        ip_address = get_ip_address(sys.argv[1])
    except Exception as e:
        print("Something wrong with arguments", e)
        exit(-1)

    print("Starting the video server on {}".format(ip_address))

    # Define Video Capture object and auxilary objects
    cap = VideoCapture()
    shape = cap.get_size()
    stream = cap.get_stream_function()

    # Define shared values
    shared_array_base = Array(ctypes.c_uint8, shape[0] * shape[1] * shape[2])
    frame = np.ctypeslib.as_array(shared_array_base.get_obj())
    frame = frame.reshape(shape[0], shape[1], shape[2])
    finished = Value('i', 0)
    write_video_flag = Value('i', 0)
    shared_fps = Value('f', 0)

    # Define processes
    video_process = Process(target=stream, args=(frame, finished, shared_fps))
    motion_detector_process = Process(target=motion_detector,
                                      args=(frame, finished, write_video_flag,
                                            shape))
    writer_process = Process(target=writer,
                             args=(frame, finished, write_video_flag, shape))

    # Launch capture process
    video_process.start()

    # Sleep for some time to allow videocapture start working first
    time.sleep(5)

    # Launch the rest processes
    motion_detector_process.start()
    writer_process.start()

    global server

    def terminate():
        cap.release()
        finished.value = True
        video_process.terminate()
        motion_detector_process.terminate()
        writer_process.terminate()
        server.shutdown()

    try:
        # Start the server on the ip address
        camhandler = get_cam_handler(frame, finished, shared_fps, ip_address)
        server = ThreadedHTTPServer((ip_address, 8080), camhandler)
        print("Server started")
        server.serve_forever()
    except KeyboardInterrupt:
        # Release everything
        terminate()
        exit(0)
    except Exception as e:
        # Something wrong happened
        print("Exception", e)
        terminate()
        exit(-1)
Beispiel #21
0
from multiprocessing import Process, Manager


def f(d):
    d[1] += '1'
    d['2'] += 2


if __name__ == '__main__':
    manager = Manager()
    d = manager.dict()
    d[1] = '1'
    d['2'] = 2

    p1 = Process(target=f, args=(d,))
    p2 = Process(target=f, args=(d,))
    p1.start()
    p2.start()
    p1.join()
    p2.join()

    print d
Beispiel #22
0
def manage_reconstruction(proc, experiment_dir, rec_id=None):
    """
    This function starts the interruption discovery process and continues the recontruction processing.
    
    It reads configuration file defined as <experiment_dir>/conf/config_rec.
    If multiple generations are configured, or separate scans are discovered, it will start concurrent reconstructions.
    It creates image.npy file for each successful reconstruction.
    Parameters
    ----------
    proc : str
        processing library, choices are: cpu, cuda, opencl
    experiment_dir : str
        directory where the experiment files are loacted
    rec_id : str
        optional, if given, alternate configuration file will be used for reconstruction, (i.e. <rec_id>_config_rec)
    Returns
    -------
    nothing
    """
    if os.path.exists('stopfile'):
        os.remove('stopfile')
    print('starting reconstruction')

    # the rec_id is a postfix added to config_rec configuration file. If defined, use this configuration.
    conf_dir = os.path.join(experiment_dir, 'conf')

    if rec_id is None:
        conf_file = os.path.join(conf_dir, 'config_rec')
    else:
        conf_file = os.path.join(conf_dir, rec_id + '_config_rec')

    # check if file exists
    if not os.path.isfile(conf_file):
        print('no configuration file ' + conf_file + ' found')
        return

    # verify the configuration file
    if not ver.ver_config_rec(conf_file):
        # if not verified, the ver will print message
        return

    try:
        config_map = ut.read_config(conf_file)
        if config_map is None:
            print("can't read configuration file " + conf_file)
            return
    except Exception as e:
        print('Cannot parse configuration file ' + conf_file +
              ' , check for matching parenthesis and quotations')
        print(str(e))
        return

    # exp_dirs_data list hold pairs of data and directory, where the directory is the root of data/data.tif file, and
    # data is the data.tif file in this directory.
    exp_dirs_data = []
    # experiment may be multi-scan in which case reconstruction will run for each scan
    for dir in os.listdir(experiment_dir):
        if dir.startswith('scan'):
            datafile = os.path.join(experiment_dir, dir, 'data', 'data.tif')
            if os.path.isfile(datafile):
                exp_dirs_data.append(
                    (datafile, os.path.join(experiment_dir, dir)))
    # if there are no scan directories, assume it is combined scans experiment
    if len(exp_dirs_data) == 0:
        # in typical scenario data_dir is not configured, and it is defaulted to <experiment_dir>/data
        # the data_dir is ignored in multi-scan scenario
        try:
            data_dir = config_map.data_dir
        except AttributeError:
            data_dir = os.path.join(experiment_dir, 'data')
        if os.path.isfile(os.path.join(data_dir, 'data.tif')):
            exp_dirs_data.append((os.path.join(data_dir,
                                               'data.tif'), experiment_dir))
        elif os.path.isfile(os.path.join(data_dir, 'data.npy')):
            exp_dirs_data.append((os.path.join(data_dir,
                                               'data.npy'), experiment_dir))
    no_runs = len(exp_dirs_data)
    if no_runs == 0:
        print('did not find data.tif nor data.npy file(s). ')
        return
    try:
        generations = config_map.generations
    except:
        generations = 0
    try:
        reconstructions = config_map.reconstructions
    except:
        reconstructions = 1
    device_use = []
    if proc == 'cpu':
        cpu_use = [-1] * reconstructions
        if no_runs > 1:
            for _ in range(no_runs):
                device_use.append(cpu_use)
        else:
            device_use = cpu_use
    else:
        try:
            devices = config_map.device
        except:
            devices = [-1]

        if no_runs * reconstructions > 1:
            if exp_dirs_data[0][0].endswith('tif'):
                data_shape = ut.read_tif(exp_dirs_data[0][0]).shape
            elif exp_dirs_data[0][0].endswith('npy'):
                data_shape = np.load(exp_dirs_data[0][0]).shape
            device_use = get_gpu_use(devices, no_runs, reconstructions,
                                     data_shape)
        else:
            device_use = devices

    # start the interrupt process
    interrupt_process = Process(target=interrupt_thread, args=())
    interrupt_process.start()

    if no_runs == 1:
        if len(device_use) == 0:
            device_use = [-1]
        dir_data = exp_dirs_data[0]
        datafile = dir_data[0]
        dir = dir_data[1]
        if generations > 1:
            gen_rec.reconstruction(proc, conf_file, datafile, dir, device_use)
        elif reconstructions > 1:
            mult_rec.reconstruction(proc, conf_file, datafile, dir, device_use)
        else:
            rec.reconstruction(proc, conf_file, datafile, dir, device_use)
    else:
        if len(device_use) == 0:
            device_use = [[-1]]
        else:
            # check if is it worth to use last chunk
            if proc != 'cpu' and len(device_use[0]) > len(device_use[-1]) * 2:
                device_use = device_use[0:-1]
        if generations > 1:
            r = 'g'
        elif reconstructions > 1:
            r = 'm'
        else:
            r = 's'
        q = Queue()
        for gpus in device_use:
            q.put((None, gpus))
        # index keeps track of the multiple directories
        index = 0
        processes = {}
        pr = []
        while index < no_runs:
            pid, gpus = q.get()
            if pid is not None:
                os.kill(pid, signal.SIGKILL)
                del processes[pid]
            datafile = exp_dirs_data[index][0]
            dir = exp_dirs_data[index][1]
            p = Process(target=rec_process,
                        args=(proc, conf_file, datafile, dir, gpus, r, q))
            p.start()
            pr.append(p)
            processes[p.pid] = index
            index += 1

        for p in pr:
            p.join()

        # close the queue
        q.close()

    interrupt_process.terminate()
    print('finished reconstruction')
def main(model,
         saveto,
         k=5,
         normalize=False,
         zero_pad=False,
         n_process=5,
         datasets='dev,test',
         sampling=False,
         pkl_name=None):
    # load model model_options
    if pkl_name is None:
        pkl_name = model
    with open('%s.pkl' % pkl_name, 'rb') as f:
        options = pkl.load(f)

    # fetch data, skip ones we aren't using to save time
    load_data, prepare_data = get_dataset(options['dataset'])
    _, valid, test, worddict = load_data(
        load_train=False,
        load_dev=True if 'dev' in datasets else False,
        load_test=True if 'test' in datasets else False)

    # <eos> means end of sequence (aka periods), UNK means unknown
    word_idict = dict()
    for kk, vv in worddict.iteritems():
        word_idict[vv] = kk
    word_idict[0] = '<eos>'
    word_idict[1] = 'UNK'

    # create processes
    queue = Queue()
    rqueue = Queue()
    processes = [None] * n_process
    for midx in xrange(n_process):
        processes[midx] = Process(target=gen_model,
                                  args=(queue, rqueue, midx, model, options, k,
                                        normalize, word_idict, sampling))
        processes[midx].start()

    # index -> words
    def _seqs2words(caps):
        capsw = []
        for cc in caps:
            ww = []
            for w in cc:
                if w == 0:
                    break
                ww.append(word_idict[w])
            capsw.append(' '.join(ww))
        return capsw

    # unsparsify, reshape, and queue
    def _send_jobs(contexts):
        for idx, ctx in enumerate(contexts):
            cc = ctx.todense().reshape([14 * 14, 512])
            if zero_pad:
                cc0 = numpy.zeros(
                    (cc.shape[0] + 1, cc.shape[1])).astype('float32')
                cc0[:-1, :] = cc
            else:
                cc0 = cc
            queue.put((idx, cc0))

    # retrieve caption from process
    def _retrieve_jobs(n_samples):
        caps = [None] * n_samples
        for idx in xrange(n_samples):
            resp = rqueue.get()
            caps[resp[0]] = resp[1]
            if numpy.mod(idx, 10) == 0:
                print 'Sample ', (idx + 1), '/', n_samples, ' Done'
        return caps

    ds = datasets.strip().split(',')

    # send all the features for the various datasets
    for dd in ds:
        if dd == 'dev':
            print 'Development Set...',
            _send_jobs(valid[1])
            caps = _seqs2words(_retrieve_jobs(len(valid[1])))
            with open(saveto + '.dev.txt', 'w') as f:
                print >> f, '\n'.join(caps)
            print 'Done'
        if dd == 'test':
            print 'Test Set...',
            _send_jobs(test[1])
            caps = _seqs2words(_retrieve_jobs(len(test[1])))
            with open(saveto + '.test.txt', 'w') as f:
                print >> f, '\n'.join(caps)
            print 'Done'
    # end processes
    for midx in xrange(n_process):
        queue.put(None)
Beispiel #24
0
    print('task1: hello, %s' % msg)
    time.sleep(1)


def task2(msg):
    print('task2: hello, %s' % msg)
    time.sleep(1)


def task3(msg):
    print( 'task3: hello, %s' % msg)
    time.sleep(1)


if __name__ == '__main__':
    p1 = Process(target=task1, args=('one',))
    p2 = Process(target=task2, args=('two',))
    p3 = Process(target=task3, args=('three',))

    start = time.time()

    p1.start()
    p2.start()
    p3.start()

    print("The number of CPU is:" + str(multiprocessing.cpu_count()))
    for p in multiprocessing.active_children():
        print("child p.name: " + p.name + "\tp.id: " + str(p.pid))

    p1.join()
    p2.join()
Beispiel #25
0
def run(host='127.0.0.1', port=5556):
    Process(target=_serve, args=(host, port)).start()
Beispiel #26
0
def build_hash_breaker_process(word_generator, batch_size, start):
    return Process(target=find_hash_original,
                   args=(hashlib.md5,
                         {next(word_generator)
                          for _ in range(batch_size)
                          }, read_shadow('shadow'), start))
Beispiel #27
0
 def run(self):
     print('IP processing running')
     valid_process = Process(target=Schedule.valid_proxy)
     check_process = Process(target=Schedule.check_pool)
     valid_process.start()
     check_process.start()
Beispiel #28
0
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    print(get_local_ip())
    s.bind((str(get_local_ip()), 5555))
    s.listen()

    conn, addr = s.accept()
    print("Connection address:  " + str(addr))

    while True:
        data = conn.recv(20)
        if not data:
            break
        server_ip = str(data.decode())
        print("Server IP is: " + server_ip)

    P1 = Process(target=run_OSC, args=[motor_values])
    P1.start()

    fig, ax = plt.subplots()
    p1 = plt.bar(0, initial_motor_state, color='b')
    ax.set_ylim(0, 100)

    anim = animation.FuncAnimation(fig,
                                   animate,
                                   interval=0,
                                   frames=None,
                                   fargs=[p1],
                                   repeat=False,
                                   blit=True)
    plt.show()
Beispiel #29
0
                        ]
                        print(row)
                    else:
                        row = [
                            smart2, smart3, smart4, smart5, smart9, smart193,
                            smart194, smart195, smart196, smart197, smart198,
                            0, 100
                        ]
                    writer.writerow(row)


if __name__ == '__main__':
    file_list = []
    os_csv_path("/ssd/diskcsv", file_list)
    fileListALL = create_fileprocess(file_list, processes_num)
    processes = list()
    #write_csv(fileListALL[0], 'f**k.csv')
    for i in range(processes_num):

        #p = Process(target=csv_ana, args=(fileListALL[i], f_l, sige_all[i],i,))
        input_file = "12thread" + str(i) + ".csv"
        p = Process(target=write_csv, args=(
            fileListALL[i],
            input_file,
        ))
        p.start()
        processes.append(p)

    for p in processes:
        p.join()

    MyManager.register('Ppo', PPO)
    manager = MyManager()
    manager.start()
    Ppo = manager.Ppo(action_dim, observation_dim, is_train=True)

    # GLOBAL_PPO = PPO(action_dim, observation_dim, is_train=True) #一个global的ppo
    print('<TRAIN_LOG> ', 'GLOBAL_PPO get', ' time:', datetime.now())

    try:
        process = []
        # 为每个worker创建进程
        workers = [Worker(wid=i, ppo=Ppo) for i in range(N_WORKER)]
        for i, worker in enumerate(workers):
            p = Process(target=worker.work, )  # 创建进程
            p.start()  # 开始进程
            print('<TRAIN_LOG> ', ' worker process: ', i, 'pid:', p.pid, ' time:', datetime.now())
            process.append(p)  # 把进程放进进程列表里,方便管理

        # 把一个全局的PPO更新放到进程列表最后
        p = Process(target=Ppo.update, )
        p.start()
        print('update process', 'pid', p.pid, ' time:', datetime.now())
        p.join()
        process.append(p)

        for p in process:
            p.join()
    except Exception as e:
        print("exception occurred")