def test_parallel_runner_01(): A = add(1, 1) B = sub(3, A) multiples = [mul(add(i, B), A) for i in range(6)] C = sum(gather(*multiples)) assert run_parallel(C, 4) == 42
def test_parallel_runner_02(): A = [delayed(1, 0.01) for i in range(100)] B = sum(gather(*A)) start = time.time() assert run_parallel(B, 4) == 100 end = time.time() # print(end-start) assert (end - start) < 0.4 # liberal upper limit for running time
def __call__( self, logger: Optional[Logger] = None, n_processes: int = 1 ) -> Union[Tuple[None, None], Tuple[List[MultiMolecule], List[Any]]]: r"""Run all jobs and return a sequence of list of MultiMolecules. Parameters ---------- logger : :class:`logging.Logger`, optional A logger for reporting job statuses. Returns ------- :class:`list` [:class:`FOX.MultiMolecule`], optional Returns ``None`` if one of the jobs crashed; a list of MultiMolecule is returned otherwise. """ # Construct the logger if logger is None: logger = cast(Logger, DummyLogger()) # Check if a hook has been specified if self.hook is not None: results = next(self.hook) return self._extract_mol(results, logger) jobs_iter = iter(self.items()) name, jobs = next(jobs_iter) promised_jobs: List[PromisedObject] = [ self.assemble_job(j, name=name) for j in jobs ] for name, jobs in jobs_iter: promised_jobs = [ self.assemble_job(j, p_j, name=name) for j, p_j in zip(jobs, promised_jobs) ] results = run_parallel(gather(*promised_jobs), n_threads=n_processes) return self._extract_mol(results, logger)
def static_sum(values, limit_n=1000): """Example of static sum routine.""" if len(values) < limit_n: return sum(values) else: half = len(values) // 2 return add( static_sum(values[:half], limit_n), static_sum(values[half:], limit_n)) @schedule def dynamic_sum(values, limit_n=1000, acc=0, depth=4): """Example of dynamic sum.""" if len(values) < limit_n: return acc + sum(values) if depth > 0: half = len(values) // 2 return add( dynamic_sum(values[:half], limit_n, acc, depth=depth-1), dynamic_sum(values[half:], limit_n, 0, depth=depth-1)) return dynamic_sum(values[limit_n:], limit_n, acc + sum(values[:limit_n]), depth) result = run_parallel(dynamic_sum(range(1000000000), 1000000), 4) print(result)
print( "Next, as many threads as there are logical cores, taken from multiprocessing.cpu_count()." ) start_mt = time.time() my_q = queue.Queue() for i in range_of_values: my_q.put(i) procs = [ threading.Thread(target=worker, args=(my_q, )) for i in range(ncpus) ] for ps in procs: ps.start() for ps in procs: ps.join() end_mt = time.time() print() print("Now Noodles with as many threads as there are logical cores.") start_noodles = time.time() result = run_parallel(gather(*(schedule(sumPrimes_noodles)(x) for x in range_of_values)), n_threads=ncpus) for item in result: print(item) end_noodles = time.time() print() print("A single thread takes {0:.2f} seconds".format(end_st - start_st)) print("Multithreading takes {0:.2f} seconds".format(end_mt - start_mt)) print("Noodles takes {0:.2f} seconds".format(end_noodles - start_noodles))
@schedule def mul(a, b): return a*b @schedule def my_sum(a, buildin_sum=sum): return buildin_sum(a) # a bit more complicated example # ------------------------------- r1 = add(1, 1) r2 = sub(3, r1) def foo(a, b, c): return mul(add(a, b), c) multiples = [foo(i, r2, r1) for i in range(6)] r5 = my_sum(gather(*multiples)) draw_workflow("graph-example2.svg", r5) answer = run_parallel(r5, 4) print("The answer is: {0}".format(answer))
def test_higher_order(): w = sum(map(sqr, num_range(0, 10))) assert run_parallel(w, 4) == 285
def test_recursion_parallel(): f100 = factorial(50.0) result = run_parallel(f100, n_threads=1) print(result) assert floor(log(result)) == 148