def start_worker(self):
     if singleton_pool.n_parallel > 1:
         singleton_pool.initialize(singleton_pool.n_parallel)
         singleton_pool.run_each(worker_init_tf)
     parrallel_sampler.populate_task(self.algo.env, self.algo.policy)
     #if singleton_pool.n_parallel > 1:
     #    singleton_pool.run_each(worker_init_tf_vars)
     print('done')
示例#2
0
    def train(self):
        singleton_pool.initialize(n_parallel=4)
        singleton_pool.run_each(_worker_init, [(id, ) for id in range(4)])
        singleton_pool.run_each(_worker_populate_task,
                                [(pickle.dumps(self), None)] * 4)
        manager = mp.Manager()
        counter = manager.Value('i', 0)

        lock = manager.RLock()
        results = singleton_pool.run_each(train_thread, [(counter, lock)] * 4)
示例#3
0
def initialize(n_parallel):
    singleton_pool.initialize(n_parallel)
    singleton_pool.run_each(_worker_init,
                            [(id, )
                             for id in range(singleton_pool.n_parallel)])
示例#4
0
def initialize(n_parallel):
    singleton_pool.initialize(n_parallel)
    singleton_pool.run_each(
        _worker_init, [(id,) for id in xrange(singleton_pool.n_parallel)])
示例#5
0
def initialize(n_parallel):
    print(("parallel_sampler:initialize n_parallel", n_parallel))
    singleton_pool.initialize(n_parallel)
    singleton_pool.run_each(_worker_init, [(id,) for id in range(singleton_pool.n_parallel)])