def main(args): logging.info( args ) device = 'gpu' if args.gpu else 'cpu' devices = device_lib.list_local_devices() num_gpus = len([d for d in devices if '/gpu' in d.name]) env = gym.make(args.game) env = Env(env, resized_width=84, resized_height=84, agent_history_length=4) num_actions = len(env.gym_actions) global_net = Network(num_actions, -1, 'cpu') actor_networks = [] for t in range(args.threads): device_index = 0 if device is 'cpu' else (t if args.threads <= num_gpus else 0) n = Network(num_actions, t, device, device_index) n.tie_global_net(global_net) actor_networks.append(n) sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=args.threads, inter_op_parallelism_threads=args.threads)) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() if not os.path.exists(args.checkpoint_dir): os.makedirs(args.checkpoint_dir) threads = [] for t, net in enumerate(actor_networks): e = Env(gym.make(args.game), net.width, net.height, net.depth) w = Worker(t, e, net, sess, saver, args.checkpoint_dir) w.start() threads.append(w) for t in threads: t.join()
def createWorker(self, clientSocket, address): # workerId = hashlib.md5(str(address).encode('utf8')).hexdigest() worker = Worker(clientSocket, address, self.db.Session, sslContext=self.sslContext) worker.start()
def run(args_obj): # create worker thread cmd_obj = Worker(args_obj=args_obj) # create msg manager thread msg_obj = MSGManager(cmd_obj) msg_obj.start() cmd_obj.start() cmd_obj.join() msg_obj.join()
def runWorker(mapURL, threads): queue = Queue.Queue() # create a thread pool and give them a queue for i in range(threads): t = Worker(queue, str(i)) t.setDaemon(True) t.start() # give the queue some data for url in mapURL.values(): queue.put(url) # wait for the queue to finish queue.join()