def RUN(systemstyle): dcpoints_poc, revisepoints_poc, SQL_poc = None, None, None if SQL: if systemstyle in SQL: SQL_poc = SQL[systemstyle] if dcpoints: if systemstyle in dcpoints: dcpoints_poc = dcpoints[systemstyle] if revisepoints: if systemstyle in revisepoints: revisepoints_poc = revisepoints[systemstyle] con = OracleConn.Oracle_Connect(*SQLPWD[systemstyle], systemtag=systemstyle, log=log) data = con.PointAll(dcpoints_poc, revisepoints_poc, SQL_poc) dtxy_out = WebCheck.DTXY(systemstyle, log=log) dtxy_out.CheckWebStatus() pooloracle = ThreadPool(multi) poolgetdat = PoolGetDat(dtxy_out) oracle_res = pooloracle.map(poolgetdat, data) if str(type(pooloracle)).find('multiprocessing.pool.ThreadPool') > -1: pooloracle.close() pooloracle.join() filename = '{}.pickle'.format(systemstyle) if os.path.isdir(os.path.dirname(__file__)): thefilename = os.path.join(os.path.dirname(__file__), filename) else: thefilename = os.path.join(os.getcwd(), filename) with open(thefilename, 'wb') as f: pickle.dump(oracle_res, f) return oracle_res
def __init__(self, worker_fn, worker_number=2, queue_size=2048, queue_timeout=30): self.Q = Queue(maxsize=queue_size) self.pool = ThreadPool(worker_number) def fn(): while True: try: item = self.Q.get(timeout=queue_timeout) except QueueEmpty: print('async queue timeout. quit') break ar = item['ar'] data = item['data'] try: worker_fn(ar, data) except: ar.set(None) ar._traceback_info = traceback.format_exc() for i in range(worker_number): self.pool.spawn(fn)
def PoolWebCon(SQLPWD, SQL, dcpoints, revisepoints, log=None): systemstyles = list(SQLPWD.keys()) len_multi = len(systemstyles) len_submulti = int((psutil.cpu_count() - 2) / (len_multi - 1)) if len_submulti < 4: len_submulti = 4 poolweb = ThreadPool(len_submulti) pooloraclecon = PoolOracleCon(SQLPWD, SQL, dcpoints, revisepoints, len_submulti, log=log) web_res = poolweb.map(pooloraclecon, systemstyles) if str(type(poolweb)).find('multiprocessing.pool.ThreadPool') > -1: poolweb.close() poolweb.join() return web_res
def download_images(image_urls): def f(url): ss = requests.session() data = get_url(url, ss) m = re.search(DOWNLOAD_URL_RE, data) if not m: return data_url = m.groups()[0].replace('\\', '') file_name = url.split('/')[-1] file_path = os.path.join(OUTPUT_DIR, file_name + '.jpg') if os.path.exists(file_path): # print('cached...') return r = ss.get(data_url) print('download image %s' % data_url) with open(file_path, 'wb') as fh: fh.write(r.content) tp = ThreadPool(4) for url in image_urls: tp.spawn(f, url) tp.join()
def run(bind_address, _worker_idx): worker_name = '(bind = {}, w = {})'.format(bind_address, _worker_idx) _socket_connect = Socket.connect def my_socket_connect(self: Socket, address): # logger.warning('socket {} bind to {}'.format(self, args.bind)) self.bind((bind_address, 0)) return _socket_connect(self, address) Socket.connect = my_socket_connect pool = ThreadPool() socks = [] state = GlobalState() class BindFanoutNamespace(FanoutNamespace): def __init__(self, io, path): super(BindFanoutNamespace, self).__init__(state, io, path) for rnd in range(batch_round): logger.warning('{}: round #{} start'.format(worker_name, rnd)) exp_conn_number = (rnd + 1) * batch_size for i in range(batch_size): # NOTE: 使用这种方式可能可以减少链接数量,因为默认是链接 '/'. But not working.!!! # sock = SocketIO(host=host, port=port, Namespace=FanoutNamespace) # NOTE: 后来我发现这个是可以work的,不过需要主动调用 connect(path = '/') sock = SocketIO(host=host, port=port) sock.define(BindFanoutNamespace, args.namespace) socks.append(sock) pool.spawn(sock.wait) while state.current_conn_number != exp_conn_number: logger.warning('{}: current_conn = {}, exp_conn = {}'.format( worker_name, state.current_conn_number, exp_conn_number)) time.sleep(5) logger.warning('{}: round #{} ok. current_conn = {}'.format( worker_name, rnd, state.current_conn_number)) while state.rcv_msg_number != total_conn_number: logger.warning('{}: rcv_msg = {}, total_conn = {}'.format( worker_name, state.rcv_msg_number, total_conn_number)) time.sleep(2) for rnd in range(batch_round): logger.warning('{}: round #{} quit'.format(worker_name, rnd)) exp_conn_number = (batch_round - rnd - 1) * batch_size for s in socks[rnd * batch_size:(rnd + 1) * batch_size]: s.disconnect(path=args.namespace) s.disconnect() while state.current_conn_number != exp_conn_number: logger.warning('{}: current_conn = {}, exp_conn = {}'.format( worker_name, state.current_conn_number, exp_conn_number)) time.sleep(5) # for s in socks: # s._close() empty = pool.join(timeout=10, raise_error=False) if not empty: logger.warning('{} join timeout. kill it'.format(worker_name)) pool.kill() logger.warning('{} QUIT !!!'.format(worker_name)) # return state return '{}: {}'.format(worker_name, str(state))