def start_worker(self): if singleton_pool.n_parallel > 1: singleton_pool.initialize(singleton_pool.n_parallel) singleton_pool.run_each(worker_init_tf) parrallel_sampler.populate_task(self.algo.env, self.algo.policy) #if singleton_pool.n_parallel > 1: # singleton_pool.run_each(worker_init_tf_vars) print('done')
def train(self): singleton_pool.initialize(n_parallel=4) singleton_pool.run_each(_worker_init, [(id, ) for id in range(4)]) singleton_pool.run_each(_worker_populate_task, [(pickle.dumps(self), None)] * 4) manager = mp.Manager() counter = manager.Value('i', 0) lock = manager.RLock() results = singleton_pool.run_each(train_thread, [(counter, lock)] * 4)
def initialize(n_parallel): singleton_pool.initialize(n_parallel) singleton_pool.run_each(_worker_init, [(id, ) for id in range(singleton_pool.n_parallel)])
def initialize(n_parallel): singleton_pool.initialize(n_parallel) singleton_pool.run_each( _worker_init, [(id,) for id in xrange(singleton_pool.n_parallel)])
def initialize(n_parallel): print(("parallel_sampler:initialize n_parallel", n_parallel)) singleton_pool.initialize(n_parallel) singleton_pool.run_each(_worker_init, [(id,) for id in range(singleton_pool.n_parallel)])