traceback.print_exc(file=sys.stderr) def fmt_score(score): return f"{'B' if score >= 0 else 'W'}+{abs(score):.1f}" print(len(ais_to_test), "ais to test") global_start = time.time() for n in range(N_GAMES): for _, e in AI.ENGINES: # no caching/replays e.shutdown() AI.ENGINES = [] with ThreadPoolExecutor(max_workers=4 * AI.NUM_THREADS) as threadpool: for b in ais_to_test: for w in ais_to_test: if b is not w: threadpool.submit(play_game, b, w) print("POOL EXIT") print(f"---- RESULTS ({n}) ----") for k, v in results.items(): b_win = sum([s > 0.3 for s in v]) w_win = sum([s < -0.3 for s in v]) print(f"{b_win} {k} {w_win} : {list(map(fmt_score,v))}") print("---- ELO ----") for ai in sorted(ai_database, key=lambda a: -a.elo_comp.rating): wins = [(b, w, s) for (b, w, s) in all_results
def __init__(self, session_name: Union[str, Storage], api_id: Union[int, str] = None, api_hash: str = None, app_version: str = None, device_model: str = None, system_version: str = None, lang_code: str = None, ipv6: bool = False, proxy: dict = None, test_mode: bool = False, bot_token: str = None, phone_number: str = None, phone_code: str = None, password: str = None, force_sms: bool = False, workers: int = Scaffold.WORKERS, workdir: str = Scaffold.WORKDIR, config_file: str = Scaffold.CONFIG_FILE, plugins: dict = None, parse_mode: str = Scaffold.PARSE_MODES[0], no_updates: bool = None, takeout: bool = None, sleep_threshold: int = Session.SLEEP_THRESHOLD, hide_password: bool = False): super().__init__() self.session_name = session_name self.api_id = int(api_id) if api_id else None self.api_hash = api_hash self.app_version = app_version self.device_model = device_model self.system_version = system_version self.lang_code = lang_code self.ipv6 = ipv6 # TODO: Make code consistent, use underscore for private/protected fields self._proxy = proxy self.test_mode = test_mode self.bot_token = bot_token self.phone_number = phone_number self.phone_code = phone_code self.password = password self.force_sms = force_sms self.workers = workers self.workdir = Path(workdir) self.config_file = Path(config_file) self.plugins = plugins self.parse_mode = parse_mode self.no_updates = no_updates self.takeout = takeout self.sleep_threshold = sleep_threshold self.hide_password = hide_password self.executor = ThreadPoolExecutor(self.workers, thread_name_prefix="Handler") if isinstance(session_name, str): if session_name == ":memory:" or len( session_name) >= MemoryStorage.SESSION_STRING_SIZE: session_name = re.sub(r"[\n\s]+", "", session_name) self.storage = MemoryStorage(session_name) else: self.storage = FileStorage(session_name, self.workdir) elif isinstance(session_name, Storage): self.storage = session_name else: raise ValueError("Unknown storage engine") self.dispatcher = Dispatcher(self) self.loop = asyncio.get_event_loop()
def test_parallel_updates(): # Just checking nothing crashes with ThreadPoolExecutor() as executor: for i in range(0, 5): executor.submit(lit.get_id, "nonsense", 100, retry=True)
import asyncio from concurrent.futures.thread import ThreadPoolExecutor from fastapi import APIRouter from typing import List, Optional from device_server.bay.station import Station from device_server.config import config from device_server.model import BayState router = APIRouter() station: Optional[Station] = None thread_executor = ThreadPoolExecutor(1, thread_name_prefix="hardware_thread_") @router.on_event('startup') async def bay_startup(): global station assert station is None, "Already initialized" station = Station(config.station) await asyncio.get_running_loop().run_in_executor(thread_executor, station.configure) @router.on_event('shutdown') async def bay_shutdown(): global station assert station is not None, "Not initialized" station = None
def lineReceived(self, line: bytes): def send_message(user): if user is not self and user.login is not None: user.sendLine(content.encode()) # try: content = line.decode(errors="ignore") # except UnicodeDecodeError: # self.sendLine("Sorry, but I don't understand you. This language/locale is not an option. In English, " # "please! ".encode()) # return if self.is_often_messaging(): self.sendLine( "Not so fast. All your base are belong to us…".encode()) self.check_is_it_tea_time() return if self.login is not None: content = content.strip() if len(content) < 1: return if content.lower() == "die": self.transport.loseConnection() content = f"{self.login} покидает этот мир." with ThreadPoolExecutor(max_workers=20) as pool: pool.map(send_message, self.factory.clients) return content = f"{self.login} said: {content}" self.factory.update_history(content) if self.__debug: print(content) with ThreadPoolExecutor(max_workers=20) as pool: pool.map(send_message, self.factory.clients) # for user in self.factory.clients: # if user is not self and user.login is not None: # user.sendLine(content.encode()) else: if self.__debug: print(f"Anonymous said '{content}'") # login:admin -> admin if not content.startswith("login:"******"Please, enter your login first like \"login:YOUR_LOGIN_HERE\"" .encode()) else: login = content.replace("login:"******"") login_unique = True if login.lower() in self.factory.restricted_logins: self.sendLine("Sorry. Not this time.".encode()) self.transport.loseConnection() return else: for client in self.factory.clients: if client.login == login: login_unique = False break if not login_unique: self.sendLine( f"Логин {login} занят, попробуйте другой".encode()) else: self.login = login self.sendLine(f"Welcome, {login}! Let's chat! ;)".encode()) self.send_history() content = f"{self.login} joined us!" with ThreadPoolExecutor(max_workers=20) as pool: pool.map(send_message, self.factory.clients) return
if a % 1 == 0 and b % i == 0: return i numbers = [(341312323, 56123136), (341212232, 81231236), (12532367, 766532134), (12532367, 766532134)] start = time() result = list(map(gcd, numbers)) end = time() print(f"Took {end - start}") # 여러 파이썬 스레드에서 이 코드를 실행하면 GIL 때문에 병렬로 여러 CPU 코어를 사용하지 못해서 속도가 개선되지 않는다. start = time() pool = ThreadPoolExecutor(max_workers=2) result = list(pool.map(gcd, numbers)) end = time() print(f"Took {end - start}") # 위 결과는 스레드 풀을 시작하고 통신하는 데 드는 오버헤드 때문에 더 느리다 start = time() pool = ProcessPoolExecutor(max_workers=2) result = list(pool.map(gcd, numbers)) end = time() print(f"Took {end - start}") # 프로세스풀익스큐터 클래스(멀티프로세싱 모듈이 제공하는 저수준 구조를 이용해) 가 실제 하는일 # 1. numbers 입력 데이터에서 map 으로 각 아이템을 가져온다.
def __init__(self, name: str, api_id: Union[int, str] = None, api_hash: str = None, app_version: str = APP_VERSION, device_model: str = DEVICE_MODEL, system_version: str = SYSTEM_VERSION, lang_code: str = LANG_CODE, ipv6: bool = False, proxy: dict = None, test_mode: bool = False, bot_token: str = None, session_string: str = None, in_memory: bool = None, phone_number: str = None, phone_code: str = None, password: str = None, workers: int = WORKERS, workdir: str = WORKDIR, plugins: dict = None, parse_mode: "enums.ParseMode" = enums.ParseMode.DEFAULT, no_updates: bool = None, takeout: bool = None, sleep_threshold: int = Session.SLEEP_THRESHOLD, hide_password: bool = False): super().__init__() self.name = name self.api_id = int(api_id) if api_id else None self.api_hash = api_hash self.app_version = app_version self.device_model = device_model self.system_version = system_version self.lang_code = lang_code self.ipv6 = ipv6 self.proxy = proxy self.test_mode = test_mode self.bot_token = bot_token self.session_string = session_string self.in_memory = in_memory self.phone_number = phone_number self.phone_code = phone_code self.password = password self.workers = workers self.workdir = Path(workdir) self.plugins = plugins self.parse_mode = parse_mode self.no_updates = no_updates self.takeout = takeout self.sleep_threshold = sleep_threshold self.hide_password = hide_password self.executor = ThreadPoolExecutor(self.workers, thread_name_prefix="Handler") if self.session_string: self.storage = MemoryStorage(self.name, self.session_string) elif self.in_memory: self.storage = MemoryStorage(self.name) else: self.storage = FileStorage(self.name, self.workdir) self.dispatcher = Dispatcher(self) self.rnd_id = MsgId self.parser = Parser(self) self.session = None self.media_sessions = {} self.media_sessions_lock = asyncio.Lock() self.is_connected = None self.is_initialized = None self.takeout_id = None self.disconnect_handler = None # Username used for mentioned bot commands, e.g.: /start@usernamebot self.username = None self.message_cache = Cache(10000) self.loop = asyncio.get_event_loop()
import json import math import re import os import time from concurrent.futures.thread import ThreadPoolExecutor from dataclasses import dataclass from dataclasses_json import dataclass_json from pathlib import Path from common import create_template, build_synonyms version = int(round(time.time() * 1000)) Path("result/gwent/html").mkdir(parents=True, exist_ok=True) Path("result/gwent/images").mkdir(parents=True, exist_ok=True) executor = ThreadPoolExecutor(max_workers=8) @dataclass_json @dataclass class CardData: id: str name: str category: str faction: str power: str armor: str provision: str color: str type: str rarity: str
def handle(self, *args, **options): with ThreadPoolExecutor(max_workers=10) as executor: executor.map(update(), )
def stop_orphaned_threads(): # make sure we shut down any orphaned threads and create a new Executor for each test MySQLStatementSamples.executor.shutdown(wait=True) MySQLStatementSamples.executor = ThreadPoolExecutor()
except requests.RequestException as e: # pass return None else: print(f'task {n} done') return r.status_code # else: # print(r.status_code) def my_print(s): s = str(s) x = (78 - len(s)) // 2 print('=' * x + ' ' + s + ' ' + '=' * x) if __name__ == "__main__": my_print('connection start') start_time = time.time() with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor: # futures = executor.map(do_req, range(50)) futures = [executor.submit(do_req, i) for i in range(20)] # print([f for f in as_completed(futures)]) for i, f in enumerate(as_completed(futures)): print(f'task {i} running: {f.running()}') print(f'total time: {time.time() - start_time}')
def start(self): with ThreadPoolExecutor(max_workers=self.workers) as executor: executor.map(self.crawl, range(self.workers))
def __init__(self, bot): self.bot = bot self.executor = ThreadPoolExecutor() self.player = None self.is_pycraft_instance = False
import dummy_threading as _threading if os.environ.get('CAMERA'): Camera = import_module('camera_' + os.environ['CAMERA']).Camera else: from App.BaseCamera import BaseCamera q = queue.Queue() blue2 = Blueprint('blue2', __name__) video = cv2.VideoCapture(0) filepath = "D:\\mypro\\pycharworkspace\\Flask\\" # 模型数据图片目录os.listdir(os.path.abspath("../")) face_detector = cv2.CascadeClassifier( "E:\PYTHON3.7\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml" ) # 级联检测器获取文件 face_thread = _threading.Thread(target=print_time) thread_pool = ThreadPoolExecutor(2) class VideoCamera(BaseCamera): @staticmethod def frames(): count = 0 while True: success, image = video.read() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 在灰度图像基础上实现的 faces = face_detector.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x + w, y + w), (255, 0, 0)) count += 1 if count <= 1: start = time.time()
def scan(self): with ThreadPoolExecutor(max_workers=WORKERS) as executor: for path, subdirs, filenames in os.walk(self._root_dir): for filename in filenames: filepath = os.path.join(path, filename) executor.submit(self._calc_hash, filepath)
# button.move(0, 0) # button.resize(100, 100) # window.layout().addWidget(button) app = QApplication([]) window = QMainWindow() window.resize(1000, 1000) button=QPushButton() button.move(100,0) button.resize(100,100) window.layout().addWidget(button) signal = Signal() signal.connect(lambda data: signal_function(data)) window.show() executor = ThreadPoolExecutor(2) executor.submit(lambda:infinite_server()) app.exec()
def queueThread(taskQueue, errorQueue, tcnt=4): print('threads : ' + str(tcnt)) with ThreadPoolExecutor(tcnt) as pool: for i in range(1, tcnt + 1): pool.submit(runThread, i, taskQueue, errorQueue)
def mergeResults(self,outfile,merge_input_mode="xywh",is_nms=True,nms_thresh=0.9,nms_name="nms"): """ :param is_nms: do non-maximum suppression on after merge :param nms_thresh: non-maximum suppression IoU threshold :return: """ print('Loading source annotation json file: {}'.format(self.srcannopath)) with open(self.srcannopath, 'r') as load_f: srcanno = json.load(load_f) mergedresults = defaultdict(list) # random.seed(0) # print("len",len(self.results)) for (filename, objlist) in self.results.items(): srcfile, paras = filename.split('___')#srcfile, paras 15_Nanshani_Park_IMG_15_04 0.5__4224__6144.jpg # print("srcfile, paras",srcfile, paras ) srcfile = srcfile.replace('_IMG', '/IMG') + self.imgext#02_Xili_Crossroad_IMG_02_01___0.5__0__0.jpg srcimageid = srcanno[srcfile]['image id'] scale, left, up = paras.replace(self.imgext, '').split('__')#scale, left, up 0.5 4224 6144 # print(srcfile, scale, left, up ) # print(f"before objlist {len(objlist)}") if not iskeep_dets: for objdict in objlist: mergedresults[srcimageid].append([*recttransfer(objdict['bbox'], float(scale), int(left), int(up),merge_input_mode),objdict['score'], objdict['category_id'],objdict['image_id']]) if iskeep_dets: keep_input=[[srcimageid,int(left), int(up),float(scale),i['bbox'][0],i['bbox'][1],i['bbox'][2],i['bbox'][3],i['score'],i['image_id'],i['category_id']] for i in objlist] # keep_input.append([[srcimageid,int(left), int(up),float(scale),i['bbox'][0],i['bbox'][1],i['bbox'][2],i['bbox'][3],i['score'],i['image_id'],i['category_id']] for i in objlist]) print(f"before keep {len(keep_input)}") if 391<=srcimageid<=420:#14otcUP_boundary keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[0],PANDA_TEST_SIZE[0]) if 421<=srcimageid<=450:#15 nanshangongyuan keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[1],PANDA_TEST_SIZE[1]) if 451<=srcimageid<=465:#16xiaoxue----------01 keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[2],PANDA_TEST_SIZE[2]) if 466<=srcimageid<=480:#16xiaoxue--------02 keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[3],PANDA_TEST_SIZE[2]) if 481<=srcimageid<=510:#17zhongguan keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[4],PANDA_TEST_SIZE[3]) if 511<=srcimageid<=540:#18xilin-------01 keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[5],PANDA_TEST_SIZE[4]) if 541<=srcimageid<=555:#18xilin----------02 keep_dets,_keep_values=self.keep_dets(np.array(keep_input),UP_boundary[6],PANDA_TEST_SIZE[5]) print(f"after keep {len(keep_dets)}") for objdict,keep_value in zip(keep_dets,_keep_values): left, up,scale=keep_value[0],keep_value[1],keep_value[2] # print("objdict",objdict)#{'image_id': 7110, 'bbox': [47.7, 866.2, 198.8, 442.8], 'score': 0.83231, 'category_id': 1} mergedresults[srcimageid].append([*recttransfer(objdict['bbox'], float(scale), int(left), int(up),merge_input_mode), objdict['score'], objdict['category_id'],objdict['image_id']]) img_size = {} for anno in srcanno: # print(srcanno[anno]['image id']) img_size[srcanno[anno]['image id']] = srcanno[anno]['image size'] if is_nms: if nms_name=="nms": for (imageid, objlist) in mergedresults.items(): masxlist=[i[2]*i[3] for i in objlist] max_wh=np.max(masxlist) objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ] keep = py_cpu_nms(np.array(objlist), nms_thresh) outdets = [] for index in keep: outdets.append(objlist[index]) mergedresults[imageid] = outdets if nms_name=="setnms": for (imageid, objlist) in mergedresults.items(): print("input nms element",objlist[0])#[829, 5939, 923, 6000, 0.24672751128673553, 1, 149] print(f"before setnms {nms_thresh} ",len(objlist)) keep=np.array(objlist)[set_cpu_nms(np.array(objlist), nms_thresh)].tolist() # print("keep",keep,"\n",len(keep),type(keep)) print(f"{imageid} after setnms_{nms_thresh} ",len(keep)) mergedresults[imageid] = keep if nms_name=="emnms": for (imageid, objlist) in mergedresults.items(): size_anno = img_size[imageid] boxes = [[obj[0] / size_anno['width'], obj[1] / size_anno['height'], obj[2] / size_anno['width'], obj[3] / size_anno['height']] for obj in objlist] scores = [obj[4] for obj in objlist] labels = [obj[5] for obj in objlist] boxes, scores, labels = nms([boxes], [scores], [labels]) boxes[:, [0, 2]] *= size_anno['width'] boxes[:, [1, 3]] *= size_anno['height'] outdets = [x[0] + [x[1], x[2]] for x in zip(boxes.tolist(), scores.tolist(), labels.tolist())] mergedresults[imageid] = outdets if nms_name=="softnms": for (imageid, objlist) in mergedresults.items(): print(f"{imageid} before softnms_{nms_thresh} ",len(objlist)) # print("ssss",len(objlist[0])) # print("ssss",objlist[0]) masxlist=[i[2]*i[3] for i in objlist] max_wh=np.max(masxlist) objlist=[[i[0],i[1],i[2],i[3],i[4]*0.5+i[3]*i[2]*0.5/max_wh,i[5],i[6]] for i in objlist ] # tempmax=np.max(np.array(objlist)[:, 4]) # print("max",tempmax)#208909381.05317593 # objlist=[[i[0],i[1],i[2],i[3],i[4]/(tempmax+0.00001),i[5],i[6]] for i in objlist ] # print(objlist) newdets,keep=soft_nms(np.array(objlist),iou_thr=nms_thresh, method='linear',sigma=0.5, min_score=1e-3)#'gaussian''linear', # keep =py_cpu_softnms(np.array(objlist),thresh=nms_thresh, Nt=0.02, sigma=0.5, method=1) # print(keep) outdets = [] for index in keep: outdets.append(objlist[index]) print(f"{imageid} after softnms_{nms_thresh} ",len(keep)) mergedresults[imageid] = outdets savelist = [] def say2(iss): imageid, objlist=iss[0],iss[1] # print(imageid, objlist) templist=[] for obj in objlist:#obj [22528, 1270, 24576, 1, 1.0, 4] # print(obj) templist.append({ "image_id": imageid, "category_id": obj[5], "bbox": tlbr2tlwh(obj[:4]), "score": obj[4] }) if test: print(f"fliter berfore len {len(templist)}") if isfliter: if 391<=imageid<=420:#14otc templist=fliter(templist,fliterscore["14_OCT"],AnotPath="/root/data/gvision/dataset/xml/14_OCT_Habour.xml", segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=0.95,yichang=0) if 421<=imageid<=450:#15 nanshangongyuan templist=fliter(templist,fliterscore["15_nanshan"],AnotPath="/root/data/gvision/dataset/xml/15_Nanshani_Park.xml", segma_woh=3,segma_area=2,up_bound=1500,down_bound=7000,down_fs=None,yichang=0) if 451<=imageid<=465:#16xiaoxue----------01 templist=fliter(templist,fliterscore["1601_shool"],AnotPath="/root/data/gvision/dataset/xml/IMG_16_01_head.xml", segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0) if 466<=imageid<=480:#16xiaoxue--------02 templist=fliter(templist,fliterscore["1602_shool"],AnotPath="/root/data/gvision/dataset/xml/IMG_16_25_02_.xml", segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0) if 481<=imageid<=510:#17zhongguan templist=fliter(templist,fliterscore["17_newzhongguan"],AnotPath="/root/data/gvision/dataset/xml/17_New_Zhongguan.xml", segma_woh=3,segma_area=3,up_bound=6000,down_bound=7000,down_fs=None,yichang=0) if 511<=imageid<=540:#18xilin-------01 templist=fliter(templist,fliterscore["1801_xilin"],AnotPath="/root/data/gvision/dataset/xml/IMG_18_01_01.xml", segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0) if 541<=imageid<=555:#18xilin----------02 templist=fliter(templist,fliterscore["1802_xilin"],AnotPath="/root/data/gvision/dataset/xml/IMG_18_02.xml", segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0) if isdel_inter: templist=del_inter(templist) if test: print(f"del_inter after len {len(templist)}") return templist executor = ThreadPoolExecutor(max_workers=80) func_var = [[file_name,dict_value] for file_name,dict_value in mergedresults.items()] print("merge bbox into self'image start ") pbar2= tqdm(total=len(mergedresults), ncols=50) for temp in executor.map(say2,func_var): savelist+=temp pbar2.update(1) pbar2.close() with open(os.path.join(self.outpath, outfile), 'w', encoding=self.code) as f: dict_str = json.dumps(savelist, indent=2) f.write(dict_str) print(f"save ***results*** json :{os.path.join(self.outpath, outfile)}")
def load_plots( provers: Dict[Path, PlotInfo], failed_to_open_filenames: Dict[Path, int], farmer_public_keys: Optional[List[G1Element]], pool_public_keys: Optional[List[G1Element]], match_str: Optional[str], show_memo: bool, root_path: Path, open_no_key_filenames=False, ) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]: start_time = time.time() config_file = load_config(root_path, "config.yaml", "harvester") changed = False no_key_filenames: Set[Path] = set() log.info(f'Searching directories {config_file["plot_directories"]}') plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file) all_filenames: List[Path] = [] for paths in plot_filenames.values(): all_filenames += paths plot_ids: Set[bytes32] = set() if match_str is not None: log.info(f'Only loading plots that contain "{match_str}" in the file or directory name') def process_file(filename: Path) -> Tuple[int, Dict]: new_provers: Dict[Path, PlotInfo] = {} nonlocal changed filename_str = str(filename) if match_str is not None and match_str not in filename_str: return 0, new_provers if filename.exists(): if filename in failed_to_open_filenames and (time.time() - failed_to_open_filenames[filename]) < 1200: # Try once every 20 minutes to open the file return 0, new_provers if filename in provers: try: stat_info = filename.stat() except Exception as e: log.error(f"Failed to open file {filename}. {e}") return 0, new_provers if stat_info.st_mtime == provers[filename].time_modified: new_provers[filename] = provers[filename] plot_ids.add(provers[filename].prover.get_id()) return stat_info.st_size, new_provers try: prover = DiskProver(str(filename)) expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR stat_info = filename.stat() # TODO: consider checking if the file was just written to (which would mean that the file is still # being copied). A segfault might happen in this edge case. if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size: log.warning( f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected" f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied." ) return 0, new_provers ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(prover.get_memo()) # Only use plots that correct keys associated with them if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys: log.warning(f"Plot {filename} has a farmer public key that is not in the farmer's pk list.") no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers if isinstance(pool_public_key_or_puzzle_hash, G1Element): pool_public_key = pool_public_key_or_puzzle_hash pool_contract_puzzle_hash = None else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) pool_public_key = None pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash if ( pool_public_keys is not None and pool_public_key is not None and pool_public_key not in pool_public_keys ): log.warning(f"Plot {filename} has a pool public key that is not in the farmer's pool pk list.") no_key_filenames.add(filename) if not open_no_key_filenames: return 0, new_provers stat_info = filename.stat() local_sk = master_sk_to_local_sk(local_master_sk) plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key) if prover.get_id() in plot_ids: log.warning(f"Have multiple copies of the plot {filename}, not adding it.") return 0, new_provers plot_ids.add(prover.get_id()) new_provers[filename] = PlotInfo( prover, pool_public_key, pool_contract_puzzle_hash, plot_public_key, stat_info.st_size, stat_info.st_mtime, ) changed = True except Exception as e: tb = traceback.format_exc() log.error(f"Failed to open file {filename}. {e} {tb}") failed_to_open_filenames[filename] = int(time.time()) return 0, new_provers log.info(f"Found plot {filename} of size {new_provers[filename].prover.get_size()}") if show_memo: plot_memo: bytes32 if pool_contract_puzzle_hash is None: plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk) else: plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk) plot_memo_str: str = plot_memo.hex() log.info(f"Memo: {plot_memo_str}") return stat_info.st_size, new_provers return 0, new_provers def reduce_function(x: Tuple[int, Dict], y: Tuple[int, Dict]) -> Tuple[int, Dict]: (total_size1, new_provers1) = x (total_size2, new_provers2) = y return total_size1 + total_size2, {**new_provers1, **new_provers2} with ThreadPoolExecutor() as executor: initial_value: Tuple[int, Dict[Path, PlotInfo]] = (0, {}) total_size, new_provers = reduce(reduce_function, executor.map(process_file, all_filenames), initial_value) log.info( f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in" f" {time.time()-start_time} seconds" ) return changed, new_provers, failed_to_open_filenames, no_key_filenames
def process_auth_request(self, request, http_client, executor=None): if executor is None: executor = ThreadPoolExecutor(max_workers=1) future = executor.submit(self.process_request, request, http_client) return future
def setUp(self): self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_") self.threadpool = ThreadPoolExecutor()
def start(self, path): now = datetime.now() self.logger.info("**********%s**********" % now.strftime("%Y-%m-%d %H-%M-%S")) wb = load_workbook(path) # 加载excel文件 config = yaml.load(wb["配置文件"].cell(row=1, column=1).value) # config = json.load(open("配置文件.json", encoding="utf-8")) # 接口地址 self.url = config.get("网址", None) if not self.url: self.logger.error("配置文件 中网址为空,程序退出。") return self.sleepnum = config.get("休眠时间", 2) self.ispatch = config.get("补数据", None) == "是" self.timeslot = config.get("时间段", []) self.prefixname = config.get("自定义扩展表名", "") # 加载区域代码 self.area_code = [ str(cell.value) for cell in wb["区域代码"]["A"] if str(cell.value) != "None" ] # self.area_code = json.load( # open("区域代码.json", encoding="utf-8")).get("区域代码", None) # print(len(self.area_code), self.area_code) self.logger.debug("area len: {0}, areas: {1}".format( len(self.area_code), self.area_code)) if not self.area_code: self.logger.error("区域代码加载失败,请检查") return # 加载指标代码 self.kpi_code = [ str(cell.value) for cell in wb["指标代码"]["A"] if str(cell.value) != "None" ] # self.kpi_code = json.load( # open("指标代码.json", encoding="utf-8")).get("指标代码", None) # print(len(self.kpi_code), self.kpi_code) self.logger.debug("kpi len: {0}, kpis: {1}".format( len(self.kpi_code), self.kpi_code)) if not self.kpi_code: self.logger.error("指标代码加载失败,请检查") return # 数据库地址 databaseurl = config.get("数据库连接地址", None) if not databaseurl: # print("没有找到数据库连接地址") self.logger.error("没有找到数据库连接地址") return # 采集天数 self.days = config.get("天数", 10) if self.ispatch and isinstance(self.timeslot, list) and len( self.timeslot) == 2: start = datetime.strptime(self.timeslot[0], "%Y-%m-%d") end = datetime.strptime(self.timeslot[1], "%Y-%m-%d") if start.year != end.year: # print("起始时间段必须在同一年") self.logger.error("起始时间段必须在同一年") return # 保证最小的日期在前面 if start > end: start, end = end, start else: # 当前日期的前self.days天 start = now + timedelta(days=-self.days) end = now + timedelta(days=-1) self.comparenum = config.get("比对差量", 100) self.threadnum = config.get("线程数", 4) self.executor = ThreadPoolExecutor(max_workers=self.threadnum) self.logger.info("线程数量: %s" % str(self.threadnum)) _dates = self._getDates(start, end) # 日期段 try: engine = create_engine( databaseurl, pool_size=self.threadnum, poolclass=QueuePool, ) Base.metadata.create_all(engine) # 创建表 self.session = scoped_session( sessionmaker(bind=engine, autocommit=False, autoflush=True)) except Exception as e: traceback.print_exc() # 初始化失败 self.logger.error(str(e)) return # 预发送人 preSheet = wb["预发送"] if not preSheet: self.logger.warn("无法找到预发送人表") self.preSend = [] else: self.preSend = [ str(cell.value) for cell in preSheet["A"] if str(cell.value) != "None" ] # list(preSheet.columns)[0]] self.logger.debug("预发送人: {0}".format(self.preSend)) self._startTasks(path, _dates)
def __init__(self, max_count=5): self.pool = ThreadPoolExecutor(max_count) self.futures = []
__license__ = "GNU Lesser General Public License v3 or later (LGPLv3+)" __copyright__ = "Copyright (C) 2017-2021 Dan <https://github.com/delivrance>" from concurrent.futures.thread import ThreadPoolExecutor class StopTransmission(StopAsyncIteration): pass class StopPropagation(StopAsyncIteration): pass class ContinuePropagation(StopAsyncIteration): pass import asyncio from . import raw, types, filters, handlers, emoji from .client import Client from .sync import idle # Save the main thread loop for future references main_event_loop = asyncio.get_event_loop() CRYPTO_EXECUTOR_SIZE_THRESHOLD = 512 crypto_executor = ThreadPoolExecutor(1, thread_name_prefix="CryptoWorker")
# encoding = utf-8 import threading import time # 创建一个线程子类 from concurrent.futures.thread import ThreadPoolExecutor def moyu_time(threadName, delay, counter): while counter: time.sleep(delay) print( "%s 开始摸鱼 %s" % (threadName, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) counter -= 1 if __name__ == '__main__': pool = ThreadPoolExecutor(20) for i in range(1, 5): pool.submit(moyu_time('xiaoshuaib' + str(i), 1, 5))
def __init__(self, Id, tname): self.logger = initLogger("datas/logs/" + Id, Id) # 加载模拟头 Headers = { "Accept": "image/gif, image/jpeg, image/pjpeg, application/x-ms-application, application/xaml+xml, application/x-ms-xbap, application/vnd.ms-excel, application/vnd.ms-powerpoint, application/msword, */*", "User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.2)", "Content-Type": "application/x-www-form-urlencoded", "Connection": "Keep-Alive", "Cache-Control": "no-cache", "Referer": "", "Cookie": "", } # 加载地区指标代码 # Codes = json.load(open("configs/codes.txt", "r", encoding="gbk")) Codes = yaml.load( open(os.path.join(ConfigsDir, "codes.txt"), "r", encoding="gbk").read()) # 加载cookies文件 Cookies = yaml.load( open(os.path.join(ConfigsDir, "cookies.txt"), "r", encoding="gbk").read()) # Cookies = json.load( # open(os.path.join(ConfigsDir, "cookies.txt"), "r", encoding="gbk")) # 加载referers文件 Referers = yaml.load( open(os.path.join(ConfigsDir, "referers.txt"), "r", encoding="gbk").read()) # Referers = json.load( # open(os.path.join(ConfigsDir, "referers.txt"), "r", encoding="gbk")) # 加载网址 Urls = yaml.load( open(os.path.join(ConfigsDir, "urls.txt"), "r", encoding="gbk").read()) # Urls = json.load( # open(os.path.join(ConfigsDir, "urls.txt"), "r", encoding="gbk")) # 加载表单 Params = yaml.load( open(os.path.join(ConfigsDir, "params.txt"), "r", encoding="gbk").read()) # Params = json.load( # open(os.path.join(ConfigsDir, "params.txt"), "r", encoding="gbk")) # 加载配置 Config = yaml.load( open(os.path.join(ConfigsDir, "config.txt"), "r", encoding="gbk").read()) # Config = json.load( # open(os.path.join(ConfigsDir, "config.txt"), "r", encoding="gbk")) # 随机数 RandomNum = str(datetime.now().day).zfill(2) + "090230123" # 第一步先获取动态的host self.form_bg = "http://{host}/sireports/userdefined_reports/css/ng/nresources/UI/images/form_bg.png" self.hosts = Urls.get("hosts", "").split(";") # print("hosts: ", hosts) self.logger.debug("hosts: %s" % self.hosts) # 动态加载采集模块 modules = Config.get("modules", {}) # print("modules:", modules) self.logger.debug("modules: %s" % modules) self.ModulesCollection = [] for key, value in modules.items(): self.logger.info("load module: %s %s" % (key, value)) # print("load module: ", key, value, "modules." + key) # __import__(key, fromlist=("modules", key)) Class = __import__("jobs.ReportCollectionJob.modules." + key, fromlist=[key]) # print("Class: ", Class) self.logger.debug("Class: %s" % Class) self.ModulesCollection.append( getattr(Class, key)(self.logger, Config.copy(), Codes, Headers.copy(), Cookies, Referers, RandomNum, Urls, Params, tname)) # self.yxbl = YxBlCollection(logger, Config.copy(), Codes, Headers.copy(), Cookies, Referers, RandomNum, Urls, Params) # self.yxcz = YxCzCollection(logger, Config.copy(), Codes, Headers.copy(), Cookies, Referers, RandomNum, Urls, Params) # self.czmx = CzMxCollection(logger, Config.copy(), Codes, Headers.copy(), Cookies, Referers, RandomNum, Urls, Params) # self.cpbg = CpBgCollection(logger, Config.copy(), Codes, Headers.copy(), Cookies, Referers, RandomNum, Urls, Params) # 多线程执行器 self.executor = ThreadPoolExecutor(max_workers=5)
def _set_executor_pool(self) -> None: """Set thread pool pool to be used.""" self._executor_pool = ThreadPoolExecutor(max_workers=len(self._tasks))
def test_without_exception_should_not_raise(self) -> None: def _task(): return with ExceptionRaisingExecutor(ThreadPoolExecutor()) as sut: sut.submit(_task)
class OrderSpuRecHandler(tornado.web.RequestHandler): executor = ThreadPoolExecutor(5) def initialize(self, logger, config_result): self.__logger = logger self.__conf = config_result async def get(self): # 查询mysql在订单表中获取满足条件的所有商品(可能有重复),查询条件:用户、小区、上架、有库存 row_all = await self.query_spu_codes() if not row_all: return await self.finish_response(0, '操作成功') # 去重后,随机获取三个商品,如不足三个则获取全部(排序不定) lucky_spu_list = await self.select_lucky_spu_codes(row_all) return await self.finish_response(0, '操作成功', lucky_spu_list) @run_on_executor def query_spu_codes(self): owner_code = self.get_query_argument('ownerCode') area_code = self.get_query_argument('areaCode') self.__logger.info('input: {}'.format({ 'ownerCode': owner_code, 'areaCode': area_code })) mysql_conf = self.__conf['mysql_2'] conn = connect(host=mysql_conf['ip'], port=int(mysql_conf['port']), user=mysql_conf['user'], password=mysql_conf['password'], database=mysql_conf['db']) select_sql = ''' SELECT order_info.spu_code FROM cb_owner_buy_goods_info AS order_info INNER JOIN cb_goods_spu_for_filter AS spu_filter ON order_info.spu_code = spu_filter.spu_code WHERE spu_filter.goods_status = 1 AND spu_filter.store_status = 1 AND order_info.owner_code = %s AND order_info.rec_area_code = %s ''' select_params = [owner_code, area_code] with conn.cursor() as cur: count = cur.execute(select_sql, select_params) if not count: return row_all = cur.fetchall() conn.close() return row_all @run_on_executor def select_lucky_spu_codes(self, row_all): all_spu_set = set([spu_tuple[0] for spu_tuple in row_all]) self.__logger.info('查询到{}个不同商品'.format(len(all_spu_set))) k = 3 if len(all_spu_set) >= 3 else len(all_spu_set) return random.sample(all_spu_set, k) def finish_response(self, res_code, msg, data=None): result_dict = {'resultCode': res_code, 'msg': msg, 'data': data} self.__logger.info('output: {}'.format(result_dict)) self.set_header("Content-Type", "application/json; charset=UTF-8") return self.finish(json.dumps(result_dict, ensure_ascii=False))
if __name__ == "__main__": def run_socket_mode_app(): import asyncio from bolt_adapter.aiohttp import AsyncSocketModeHandler async def socket_mode_app(): app_token = os.environ.get("SLACK_APP_TOKEN") await AsyncSocketModeHandler(app, app_token).connect_async() await asyncio.sleep(float("inf")) asyncio.run(socket_mode_app()) from concurrent.futures.thread import ThreadPoolExecutor socket_mode_thread = ThreadPoolExecutor(1) socket_mode_thread.submit(run_socket_mode_app) app.start() # export SLACK_APP_TOKEN= # export SLACK_SIGNING_SECRET= # export SLACK_CLIENT_ID= # export SLACK_CLIENT_SECRET= # export SLACK_SCOPES= # pip install .[optional] # pip install slack_bolt # python integration_tests/samples/socket_mode/{this file name}.py