def thread_test(fn, args): from threading import Thread as Worker start = time() procs = [Worker(target=fn, args=args) for _ in range(workers)] any(p.start() for p in procs) # start the workers any(p.join() for p in procs) # wait for the workers to finish return start
def multiprocessing_test(fn, args): from multiprocessing import Process as Worker start = time() procs = [Worker(target=fn, args=args) for _ in range(workers)] any(p.start() for p in procs) # start the workers any(p.join() for p in procs) # wait for the workers to finish return start
def thread_pool_exe_test(fn, args): from concurrent.futures import ThreadPoolExecutor as Worker from concurrent.futures import as_completed start = time() with Worker(max_workers=workers, thread_name_prefix="Perform") as exe: futures = {exe.submit(fn, *args): job for job in range(workers)} for future in as_completed(futures): future.result() return start
def run(self): """function to run""" t_name = threading.currentThread().getName() p_name = multiprocessing.current_process().name print('[%s|%s] waiting for cancel happen' % (p_name, t_name)) new_t = Worker(target=rogue_resource) new_t.daemon = True #ret = self.cancel.wait(timeout=2) new_t.start() new_t.join(timeout=2) if new_t.is_alive(): print('it is still running ....') return False else: return True
if c == "set": registers[chunks[1]] = y elif c == "add": registers[chunks[1]] += y elif c == "mul": registers[chunks[1]] *= y elif c == "mod": registers[chunks[1]] %= y elif c == "jgz" and x > 0: pc += y - 1 pc += 1 if pid == 1: print(send_counter) if __name__ == "__main__": with open("18_input.txt") as f: instructions = [l.strip() for l in f.readlines()] q0 = Queue() q1 = Queue() p1 = Worker(target=program, args=(instructions, q0, q1, 0)) p2 = Worker(target=program, args=(instructions, q1, q0, 1)) p1.start() p2.start() p1.join() p2.join()
def start_worker(Worker, listen_sock): worker = Worker(target=server_loop, args=(listen_sock, )) worker.daemon = True worker.start() return worker
:param cycles: The number of iterations to perform ''' logger = log_to_stderr() logger.setLevel(logging.DEBUG) logger.debug("starting worker: %d" % os.getpid()) try: count = 0 client = ModbusTcpClient(host) while count < cycles: result = client.read_holding_registers(10, 1).getRegister(0) count += 1 except: logger.exception("failed to run test successfully") logger.debug("finished worker: %d" % os.getpid()) #---------------------------------------------------------------------------# # run our test and check results #---------------------------------------------------------------------------# # We shard the total number of requests to perform between the number of # threads that was specified. We then start all the threads and block on # them to finish. This may need to switch to another mechanism to signal # finished as the process/thread start up/shut down may skew the test a bit. #---------------------------------------------------------------------------# args = (host, int(cycles * 1.0 / workers)) procs = [Worker(target=single_client_test, args=args) for _ in range(workers)] start = time() any(p.start() for p in procs) # start the workers any(p.join() for p in procs) # wait for the workers to finish stop = time() print "%d requests/second" % ((1.0 * cycles) / (stop - start))
def main(args): M, R, V, ma = init_model(args) input_basename = basename(args.input_filename) splitter = do_splitter(literal_eval(args.delimiter)) unk_index = V[literal_eval(args.unk)] v_reader = lambda w: V[w] if w in V else unk_index i2w = {v: k for k, v in V.items()} initial_basename = basename(args.initial_model) args.saved_model = args.save_format.format(initial_basename, input_basename, len(V), args.dim, args.param) evaluated_model = initial_basename print_flush("Done\n") for variable in sorted(vars(args)): print(variable + ":", vars(args)[variable]) print_flush("Building functions ...") cost_f, update_f, getter_f = build_functions(M, ma, args, R) print_flush("Done\n") if args.random: print_flush("Reading \"" + args.input_filename + "\" ...") with open(args.input_filename) as input_f: train_database = [l.strip() for l in input_f.buffer] print_flush(len(train_database), "samples. Done\n") print("[sample\tentropy\tsample/sec\tmatrix cutoff\treadout cutoff]") global halted halted = Value('i', 0) eval_epoch = False first_training = True for i in range(args.epoch + int(args.do_eval)): if i == args.epoch: eval_epoch = True first_training = False elif i == 0: first_training = True eval_epoch = False else: first_training = False eval_epoch = False if eval_epoch: print("Evaluating: \"" + evaluated_model + "\"") f = cost_f if first_training: print("Training: \"" + args.saved_model + "\"") f = update_f if not eval_epoch and args.random: print_flush("[Shuffle ...", file=sys.stderr) shuffle(train_database) print("Done]", file=sys.stderr) if args.random: dataset_reader = iter(train_database) else: input_f = open(args.input_filename) dataset_reader = (l.strip() for l in input_f.buffer) input_queue_recv, input_queue_send = Pipe() reader = Worker(target=reader_process, name='reader', args=(input_queue_send, dataset_reader, splitter, v_reader, args)) reader.start() print("[Epoch %d]" % (i + 1), file=sys.stderr) try: worker_process(input_queue_recv, f, getter_f, args, i2w, ma.positions) except KeyboardInterrupt: halted.value = 1 reader.join() if halted.value > 0: print("Halted", file=sys.stderr) return 1 if eval_epoch: return 0 if not args.save_at_end or i == args.epoch - 1: save_model(getter_f, args.saved_model, i2w, ma.positions) evaluated_model = args.saved_model return 0
from __future__ import print_function from Queue import Queue as Jobs from threading import Thread as Worker from random import randint as assign_job from time import sleep as work # the job is spleeping! def gowork(worker_id, jobs): job_count = 0 while True: job_id, job_time = jobs.get() print('worker {} is working with job {}.'.format(worker_id, job_id)) work(job_time) job_count += 1 print('worker {} finished job {}.'.format(worker_id, job_id)) print('worker {} have done {} jobs.'.format(worker_id, job_count)) jobs.task_done() if __name__ == '__main__': jobs = Jobs() for job_id in range(10): job = job_id, assign_job(1, 5) jobs.put(job) for worker_id in range(3): worker = Worker(target=gowork, args=(worker_id, jobs)) worker.daemon = True worker.start() jobs.join() print('all jobs is done!')
count += 1 except: logger.exception("failed to run test successfully") logger.debug("finished worker: %d" % os.getpid()) # --------------------------------------------------------------------------- # # run our test and check results # --------------------------------------------------------------------------- # # We shard the total number of requests to perform between the number of # threads that was specified. We then start all the threads and block on # them to finish. This may need to switch to another mechanism to signal # finished as the process/thread start up/shut down may skew the test a bit. # RTU 32 requests/second @9600 # TCP 31430 requests/second # --------------------------------------------------------------------------- # if __name__ == "__main__": args = (host, int(cycles * 1.0 / workers)) procs = [Worker(target=single_client_test, args=args) for _ in range(workers)] start = time() any(p.start() for p in procs) # start the workers any(p.join() for p in procs) # wait for the workers to finish stop = time() print("%d requests/second" % ((1.0 * cycles) / (stop - start))) print("time taken to complete %s cycle by " "%s workers is %s seconds" % (cycles, workers, stop-start))