Exemple #1
0
def step_les_models(model_time, work_queue, offset=les_spinup):
    global errorFlag
    les_wall_times = []
    if not any(les_models):
        return les_wall_times
    if les_queue_threads >= len(
            les_models):  # Step all dales models in parallel
        if async_evolve:  # evolve all dales models with asynchronous Amuse calls
            reqs = []
            pool = AsyncRequestsPool()
            for les in les_models:
                req = les.evolve_model.asynchronous(model_time +
                                                    (offset | units.s),
                                                    exactEnd=True)
                reqs.append(req)
                pool.add_request(req)
            # now while the dales threads are working, sync the netcdf to disk
            spio.sync_root()
            # wait for all threads
            pool.waitall()
            try:
                les_wall_times = [r.result().value_in(units.s) for r in reqs]
                log.info("async step_les_models() done. Elapsed times:" +
                         str(['%5.1f' % t for t in les_wall_times]))
            except Exception as e:
                log.error("Exception caught while gathering results: %s" %
                          e.message)

        else:  # evolve all dales models using python threads
            threads = []
            for les in les_models:
                t = threading.Thread(target=step_les,
                                     args=(les, model_time, offset),
                                     name=str(les.grid_index))
                # t.setDaemon(True)
                threads.append(t)
                t.start()
            # now while the dales threads are working, sync the netcdf to disk
            spio.sync_root()
            # wait for all threads
            for t in threads:
                # log.info("Waiting to join thread %s..." % t.name)
                t.join()
            # log.info("joined thread %s" % t.name)
    elif les_queue_threads > 1:
        for les in les_models:
            work_queue.put((les, model_time))  # enqueue all dales instances
        # now while the dales threads are working, sync the netcdf to disk
        spio.sync_root()
        work_queue.join()  # wait for all dales work to be completed
        if errorFlag:
            log.info("One thread failed - exiting ...")
            # stop_worker_threads(work_queue)  #  signal worker threads to quit - now an atexit function, should not
            # need it here
            finalize()
            sys.exit(1)
    else:  # sequential version
        for les in les_models:
            step_les(les, model_time, offset)
    return les_wall_times
Exemple #2
0
    def test25(self):
        """ more test of pool: calls of same code """
        from amuse.rfi.async_request import AsyncRequestsPool
        instance1 = ForTesting(self.exefile)

        r1 = instance1.do_sleep(1, return_request=True)
        r2 = instance1.echo_int(2, return_request=True)

        p1 = AsyncRequestsPool()
        r1.wait()
        r2.wait()
        p1.add_request(r1)
        p1.add_request(r2)

        #~ p1=r1.join(r2)

        p1.waitall()

        self.assertEqual(r2.result(), 2)

        instance1.stop()
Exemple #3
0
t = time.time()
pool = AsyncRequestsPool()

# add requests to the two codes to the pool
request1 = d1.evolve_model.asynchronous(target_time, exactEnd=True)
pool.add_request(request1)

request2 = d2.evolve_model.asynchronous(target_time, exactEnd=True)
pool.add_request(request2)

print('Generating asynchronous requests  %f s' % (time.time() - t))

# wait for the requests to finish
print('Calling pool.waitall()')
t = time.time()
pool.waitall()
print('pool.waitall() returned %f s' % (time.time() - t))

# setting grid data

# normal synchronous call
t = time.time()
d1.fields[:, :, 3:6].U = 1 | units.m / units.s
print('Synchronous setting %f s' % (time.time() - t))

# asynchronous call
t = time.time()
d2.fields[:, :, 3:6].request.U = 1 | units.m / units.s
print('Asynchronous setting %f s' % (time.time() - t))

# getting grid data