def test_ppft_callable(): """Test parallel python with callable """ logger = getLogger("ostap.test_ppft_callable") logger.info('Test job submission with %s' % ppft) if not ppft: logger.error("ppft is not available") return logger.warning("test is disabled for UNKNOWN REASON") return job_server = ppft.Server() jobs = [(i, job_server.submit(mh.__call__, (i, n))) for (i, n) in enumerate(inputs)] result = None for input, job in progress_bar(jobs): histo = job() if not result: result = histo else: result.Add(histo) del histo logger.info("Histogram is %s" % result.dump(80, 20)) logger.info("Entries %s/%s" % (result.GetEntries(), sum(inputs))) result.Draw() time.sleep(2) return result
def test_ppft_callable(): """Test parallel python with callable """ logger = getLogger("ostap.test_ppft_callable") logger.info('Test job submission with %s' % ppft) if not ppft: logger.error("ppft is not available") return if DILL_PY3_issue: logger.warning("test is disabled for Python %s (DILL/ROOT/PY3 issue)") return job_server = ppft.Server() jobs = [(i, job_server.submit(mh.__call__, (i, n))) for (i, n) in enumerate(inputs)] result = None for input, job in progress_bar(jobs): histo = job() if not result: result = histo else: result.Add(histo) del histo logger.info("Histogram is %s" % result.dump(80, 20)) logger.info("Entries %s/%s" % (result.GetEntries(), sum(inputs))) with wait(1), use_canvas('test_ppft_callable'): result.draw() return result
def parallelS(FCMs): counter = 0 print("Number of FCMs to parallelize: ", len(FCMs)) ppservers = () job_server = pp.Server(ppservers=ppservers) # creates the job server print("Starting pp with", job_server.get_ncpus(), "workers") # number of local processors start_time = time.time() # begin time # job1 = job_server.submit(parallelizeS, args=(fcm, FCMs[fcm],)) # Retrieves the result calculated by job1 # The value of job1() is the same as sum_primes(100) # If the job has not been finished yet, execution will wait here until result is available # result = job1() # print ("F**k is\n\n\n", result) # cluster = dispy.JobCluster(parallelizeS) # jobs = [(fcm, cluster.submit(fcm, FCMs[fcm])) for fcm in FCMs] # jobs - the tasks passed to the parallelize function jobs = [(fcm, job_server.submit(func=(parallelizeS), \ args=(fcm, FCMs[fcm],), \ # depfuncs=(simulation.stabilize, simulation.steps, simulation.changeTransferFunction, simulation.run,), \ modules=("sys","FCM","Simulation",))) for fcm in FCMs] # jobs = [(fcm, job_server.submit(parallelizeS, args=(fcm, FCMs[fcm],) )) for fcm in FCMs] for fcm, job in jobs: counter += 1 print("Simulation on FCM #", counter, "is\n\n", job()) # the output of simulation print("\nTime elapsed: ", time.time() - start_time, "s\n\n") # end time return job_server.print_stats()
def parallelT(job_count): ppservers = () job_server = pp.Server(ppservers=ppservers) # creates the job server # job_server.get_ncpus() print("Starting pp with", job_server.get_ncpus(), "workers") # number of local processors start_time = time.time() # begin time # jobs - the tasks passed to the parallelize function jobs = [(i, job_server.submit(func=(parallelizeT), args=(), modules=("math", ))) for i in range(job_count)] for i, job in jobs: print("Transfer function #", i, "is", job()) # the lambda transfer function print("\nTime elapsed: ", time.time() - start_time, "s\n\n") return job_server.print_stats()
def test_ppft_function(): """Test parallel python with plain function """ logger = getLogger("ostap.test_ppft_function") logger.info('Test job submission with %s' % ppft) if not ppft: logger.error("ppdf is not available") return from ostap.core.known_issues import DILL_ROOT_issue if DILL_ROOT_issue: logger.warning("test is disabled for Python %s (dill/ROOT issue)") return job_server = ppft.Server() jobs = [(i, job_server.submit(make_histo, (i, n))) for (i, n) in enumerate(inputs)] result = None for input, job in progress_bar(uimap(jobs), max_value=len(jobs)): histo = job() if not result: result = histo else: result.Add(histo) del histo logger.info("Histogram is %s" % result.dump(80, 20)) logger.info("Entries %s/%s" % (result.GetEntries(), sum(inputs))) job_server.print_stats() result.Draw() time.sleep(2) return result
return sum([x for x in range(2, n) if isprime(x)]) ######################################################################## print("""Usage: python sum_primesX.py [tunnelport] [tunnelport] - the port number(s) of the local ssh tunnel connection, if omitted no tunneling will be used.""") ppservers = [] for i in range(1,len(sys.argv)): tunnelport = int(sys.argv[i]) ppservers.append("localhost:%s" % tunnelport) ppservers = tuple(ppservers) # Creates jobserver with automatically detected number of workers job_server = ppft.Server(ppservers=ppservers) # Allow running without local workers if LOCAL_WORKERS != 'autodetect': job_server.set_ncpus(LOCAL_WORKERS) #print("Known servers: [('local',)] %s %s" % (job_server.ppservers,job_server.auto_ppservers)) print("Known servers: [('local',)] %s" % (job_server.ppservers)) print("Starting ppft with %s local workers" % job_server.get_ncpus()) # Submit a job of calulating sum_primes(100) for execution. # sum_primes - the function # (100,) - tuple with arguments for sum_primes # (isprime,) - tuple with functions on which function sum_primes depends # ("math",) - tuple with module names which must be imported before # sum_primes execution