Esempio n. 1
0
def step_les_models(model_time, work_queue, offset=les_spinup):
    global errorFlag
    les_wall_times = []
    if not any(les_models):
        return les_wall_times
    if les_queue_threads >= len(
            les_models):  # Step all dales models in parallel
        if async_evolve:  # evolve all dales models with asynchronous Amuse calls
            reqs = []
            pool = AsyncRequestsPool()
            for les in les_models:
                req = les.evolve_model. async (model_time + (offset | units.s),
                                               exactEnd=True)
                reqs.append(req)
                pool.add_request(req)
            # now while the dales threads are working, sync the netcdf to disk
            spio.sync_root()
            # wait for all threads
            pool.waitall()
            try:
                les_wall_times = [r.result().value_in(units.s) for r in reqs]
                log.info("async step_les_models() done. Elapsed times:" +
                         str(['%5.1f' % t for t in les_wall_times]))
            except Exception as e:
                log.error("Exception caught while gathering results: %s" %
                          e.message)

        else:  # evolve all dales models using python threads
            threads = []
            for les in les_models:
                t = threading.Thread(target=step_les,
                                     args=(les, model_time, offset),
                                     name=str(les.grid_index))
                # t.setDaemon(True)
                threads.append(t)
                t.start()
            # now while the dales threads are working, sync the netcdf to disk
            spio.sync_root()
            # wait for all threads
            for t in threads:
                # log.info("Waiting to join thread %s..." % t.name)
                t.join()
            # log.info("joined thread %s" % t.name)
    elif les_queue_threads > 1:
        for les in les_models:
            work_queue.put((les, model_time))  # enqueue all dales instances
        # now while the dales threads are working, sync the netcdf to disk
        spio.sync_root()
        work_queue.join()  # wait for all dales work to be completed
        if errorFlag:
            log.info("One thread failed - exiting ...")
            # stop_worker_threads(work_queue)  #  signal worker threads to quit - now an atexit function, should not
            # need it here
            finalize()
            sys.exit(1)
    else:  # sequential version
        for les in les_models:
            step_les(les, model_time, offset)
    return les_wall_times