def __init__(self, stream: trio.abc.Stream): self._stream = stream self._write_cap = trio.CapacityLimiter(1) # blocks reading from stream and _read_buf at the same time self._read_cap = trio.CapacityLimiter(1) self._read_buf = bytearray()
def __init__(self, config, auth_provider=None, vault_dirs=None, nursery=None): self.auth_provider = auth_provider self.vaults = [] # type: List[Vault] self.vault_dirs = vault_dirs self.config = config self.nursery = nursery # type: Nursery self.vault_controllers = {} # type: Dict[str, VaultController] self.concurrency = int(self.config.app['concurrency']) # These enforce global limits on various bundle actions self.limiters = { 'update': trio.CapacityLimiter(8), 'stat': trio.CapacityLimiter(8), 'upload': trio.CapacityLimiter(8), 'download': trio.CapacityLimiter(8), } # type: Dict[str, trio.CapacityLimiter] self.stats = { 'uploads': 0, 'downloads': 0, } # A map from Bundle -> Future that contains all bundles scheduled for a push self._scheduled_pushes = {} # type: Dict[Bundle, Any] # A map from Bundle -> Task that contains all running pushes self._running_pushes = {} # type: Dict[Bundle, Any] # A map from Bundle -> Exception that contains all failed pushes self._failed_pushes = {} # type: Dict[Bundle, Any] # A map from folder -> Watchdog. Used by the daemon and the "watch" command. self._watchdogs = {} # type: Dict[str, Any] # A map from folder -> Task. Used by the daemon to autopull vault periodically. self._autopull_tasks = {} # type: Dict[str, Any] store.init(config) self.flying_vaults = FlyingVaultManager(self) self.db_vaults = VaultManager(self) # might be renamed to/merged with self.vaults self.revisions = RevisionManager(self) self.bundles = BundleManager(self) self.user_vault_keys = UserVaultKeyManager(self) self.vault_users = VaultUserManager(self) self.identity = Identity(os.path.join(self.config.config_dir, self.config.get('identity.private_key')), os.path.join(self.config.config_dir, self.config.get('identity.public_key')), self.config) super(SyncryptApp, self).__init__()
async def main(self, agents): self.session = asks.Session( connections=CONCURRENT_REQUESTS, headers={"Authorization": f"Token {settings.CALLHUB_API_KEY}"}, base_location=settings.CALLHUB_API_DOMAIN, endpoint="/v1/", ) # Callhub limite à 13 requêtes par seconde, on prend une mini-marge. self.bucket = TrioTokenBucket(5, RATE_LIMIT) self.conn_capacity = trio.CapacityLimiter(CONCURRENT_REQUESTS) equipes = await self.recuperer_equipes() for a in agents: if a["team"] not in equipes: logger.info(f"Équipe {a['team']} inexistante.") agents = [a for a in agents if a["team"] in equipes] existants = await self.recuperer_agents_existants() a_ajouter = [a for a in agents if a["email"] not in existants] logger.info( f"{len(a_ajouter)} agents à ajouter. Délai estimé {self.temps_estime(len(a_ajouter))}." ) async with trio.open_nursery() as nursery: for a in a_ajouter: await nursery.start(self.creer_agent, a)
async def async_filter( function: Callable[[T], Awaitable[T]], iterable: AsyncIterable[T], max_concurrent ) -> AsyncIterator[AsyncIterable[T]]: send_result, receive_result = trio.open_memory_channel[T](0) limiter = trio.CapacityLimiter(max_concurrent) async def wrapper(prev_done: trio.Event, self_done: trio.Event, item: T) -> None: async with limiter: result = await function(item) await prev_done.wait() if result: await send_result.send(item) self_done.set() async def consume_input(nursery) -> None: prev_done = trio.Event() prev_done.set() async for item in iterable: self_done = trio.Event() nursery.start_soon(wrapper, prev_done, self_done, item) prev_done = self_done await prev_done.wait() await send_result.aclose() async with trio.open_nursery() as nursery: nursery.start_soon(consume_input, nursery) yield receive_result nursery.cancel_scope.cancel()
async def async_map_unordered( function: Callable[[T], Awaitable[U]], iterable: AsyncIterable[T], max_concurrent) -> AsyncIterator[AsyncIterable[U]]: # pylint: disable=unsubscriptable-object send_result, receive_result = trio.open_memory_channel[U](0) limiter = trio.CapacityLimiter(max_concurrent) remaining_tasks: t.Set[int] = set() async def wrapper(task_id: int, item: T) -> None: # pylint: disable=not-async-context-manager async with limiter: result = await function(item) await send_result.send(result) remaining_tasks.remove(task_id) async def consume_input(nursery) -> None: async for task_id, item in aenumerate(iterable): remaining_tasks.add(task_id) nursery.start_soon(wrapper, task_id, item) while remaining_tasks: await trio.sleep(0) await send_result.aclose() async with trio.open_nursery() as nursery: nursery.start_soon(consume_input, nursery) yield receive_result nursery.cancel_scope.cancel()
async def async_map(function: Callable[[T], Awaitable[U]], iterable: AsyncIterable[T], max_concurrent) -> AsyncIterator[AsyncIterable[U]]: # pylint: disable=unsubscriptable-object send_result, receive_result = trio.open_memory_channel[U](0) limiter = trio.CapacityLimiter(max_concurrent) async def wrapper(prev_done: trio.Event, self_done: trio.Event, item: T) -> None: # pylint: disable=not-async-context-manager async with limiter: result = await function(item) await prev_done.wait() await send_result.send(result) self_done.set() async def consume_input(nursery) -> None: prev_done = trio.Event() prev_done.set() async for item in iterable: self_done = trio.Event() nursery.start_soon(wrapper, prev_done, self_done, item) prev_done = self_done await prev_done.wait() await send_result.aclose() async with trio.open_nursery() as nursery: nursery.start_soon(consume_input, nursery) yield receive_result nursery.cancel_scope.cancel()
def __init__(self, tasks: [], max_threads: int = 10, max_retries: int = 0, pass_fail_job: bool = False, verbose: bool = True, threading: bool = True): """ :tasks: [Task, function] items :max_threads: Maximum threads to Parallel, max_threads=0 means no limitation :max_retries: How many time retry when job fail :pass_fail_job: No exeption when a job fail :verbose: Verbose or not """ assert isinstance(tasks, list) and all( callable(task) for task in tasks ), 'You have to transfer a list of callable instances or mlchain.Task' self.tasks = tasks self.max_threads = max(0, max_threads) self.threading = threading if self.max_threads > 0: self.limiter = trio.CapacityLimiter(self.max_threads) else: self.limiter = None self.max_retries = max(max_retries + 1, 1) self.pass_fail_job = pass_fail_job self.verbose = verbose self.show_progress_bar = False
async def push_vault(self, vault): "Push a single vault" logger.info('Pushing %s', vault) try: self.identity.assert_initialized() await self.sync_vault(vault) limit = trio.CapacityLimiter(1) await self.set_vault_state(vault, VaultState.SYNCING) async with trio.open_nursery() as nursery: await vault.backend.open() await self.update_vault_metadata(vault) async for bundle in self.bundles.upload_bundles_for_vault(vault): async with limit: await self.push_bundle(bundle) # nursery.start_soon(self.push_bundle, bundle) await self.set_vault_state(vault, VaultState.READY) except Exception: vault.logger.exception("Failure during vault push") await self.set_vault_state(vault, VaultState.FAILURE)
async def pull_vault(self, vault, full=False): with trio.fail_after(5*60): while vault.state == VaultState.SYNCING: await trio.sleep(0.5) vault.logger.info('Pulling %s', vault) # First, we will iterate through the changes, validate the chain and build up the state of # the vault (files, keys, ...). This is called "syncing". await self.sync_vault(vault, full=full) async with self.vault_controllers[vault.id].lock: await self.set_vault_state(vault, VaultState.SYNCING) # Then, we will do a change detection for the local folder and download every bundle that # has changed. # TODO: do a change detection (.vault/metadata store vs filesystem) limit = trio.CapacityLimiter(1) try: # here we should use trimeter too allow for parallel processing async with trio.open_nursery(): async for bundle in self.bundles.download_bundles_for_vault(vault): async with limit: await self.pull_bundle(bundle) await self.set_vault_state(vault, VaultState.READY) except Exception: vault.logger.exception("Failure while pulling vault") await self.set_vault_state(vault, VaultState.FAILURE) await self.set_vault_state(vault, VaultState.READY)
async def bruteforce_subdomains(domain, iterator, nameservers): limit = trio.CapacityLimiter(defaults.DNS_LIMIT) start_time = time.time() source = list(init_source(iterator)) results = [] pbar = tqdm(total=len(source)) while True: batch, source = source[:defaults.DNS_BATCH_SZ], source[defaults.DNS_BATCH_SZ:] if not batch: break async with trio.open_nursery() as nursery: for item in batch: subdomain = append_subdomain(domain, item) nursery.start_soon( query_dns, subdomain, nameservers, results, limit, pbar ) end_time = time.time() print(f"Total Time: {end_time - start_time}s") return results
async def _run(self, tasks: List[Task]) -> List[Any]: try: async with trio.open_nursery() as nursery: limit = None if self._jobs is None else trio.CapacityLimiter( self._jobs) self.before_tasks(tasks) self._results = {} index = 0 try: for task in tasks: if limit is not None: await limit.acquire_on_behalf_of(task.name) nursery.start_soon(self._run_task, index, task, limit, nursery) index += 1 except BaseException: nursery.cancel_scope.cancel() self.cancelled = True raise return values_sorted_by_key(self._results) except BaseException: self.cancelled = True raise finally: self.after_tasks(tasks)
async def download_retry(self, url: str, tries: int = 3) -> asks.response_objects.Response: host = url_get_hostname(url) limit = self.locks.setdefault(host, trio.CapacityLimiter(1)) self.urls_waiting.setdefault(host, set()).add(url) logger.debug(f"waiting on limiter for host {host} " f"({len(self.urls_waiting[host])} waiting)") try: async with limit: logger.debug(f"woke up for {url}") ld = self.last_download.get(host, 0.0) await trio.sleep_until(ld + self.download_delay) for i in range(tries): r = await asks.get(url) now = trio.current_time() self.last_download[host] = now try: r.raise_for_status() except Exception as e: if i >= tries - 1: raise logger.debug(f"got {e}, retrying (#{i+1})") err_delay = self.download_delay * (2**(i + 2)) err_delay = min(err_delay, 30.0) await trio.sleep(err_delay) else: return r return r except BaseException: print(self.urls_waiting) raise finally: self.urls_waiting[host].remove(url)
async def get_consequences(limiter, variants): ''' asychronously get variant consequences and symbols from ensembl ''' sem = trio.CapacityLimiter(50) async with trio.open_nursery() as nursery: for x in variants: nursery.start_soon(cq_and_symbol, limiter, x, sem) return variants
def __init__(self, limiter=None, thread_name_prefix=None, max_workers=None): self._running = True if limiter is None and max_workers is not None: limiter = trio.CapacityLimiter(max_workers) self._limiter = limiter
def __init__(self, *args, **kwargs): super(Spider, self).__init__() self.__results = {} self.__cover_info = [] self.__root_path = "videos" self.__res_file_name = "res.json" self.__res_xlsx_name = "res.xlsx" self.__res_png_name = "res" self.__limits = trio.CapacityLimiter(conf.maxConnections * 10)
async def test_run_sync(shutdown_cache): trio_pid = os.getpid() limiter = trio.CapacityLimiter(1) child_pid = await run_sync(os.getpid, limiter=limiter) assert child_pid != trio_pid with pytest.raises(ValueError) as excinfo: await run_sync(_raise_pid, limiter=limiter) assert excinfo.value.args[0] != trio_pid
async def _async_get_all_clients(self, agent=None, deep=True, last_attempt=True): ret = [] query = await self._async_status() if not query or "clients" not in query: return ret async def __compute_client_data(client, queue, limit): async with limit: cli = {} cli["name"] = client["name"] cli["state"] = self._status_human_readable( client["run_status"]) infos = client["backups"] if cli["state"] in ["running"]: cli["last"] = "now" cli["last_attempt"] = "now" elif not infos: cli["last"] = "never" cli["last_attempt"] = "never" else: convert = True infos = infos[0] server_version = await self._async_get_server_version() if server_version and server_version < BURP_STATUS_FORMAT_V2: cli["last"] = infos["timestamp"] convert = False # only do deep inspection when server >= BURP_STATUS_FORMAT_V2 if deep: logs = await self._async_get_backup_logs( infos["number"], client["name"]) cli["last"] = logs["start"] else: cli["last"] = utc_to_local(infos["timestamp"]) if last_attempt: last_backup = await self._async_get_last_backup( client["name"]) if convert: cli["last_attempt"] = utc_to_local( last_backup["timestamp"]) else: cli["last_attempt"] = last_backup["timestamp"] queue.append(cli) clients = query["clients"] limiter = trio.CapacityLimiter(self.concurrency) async with trio.open_nursery() as nursery: for client in clients: nursery.start_soon(__compute_client_data, client, ret, limiter) return ret
async def worker(job_item, task_status): # Backpressure: hold limiter for entire task to avoid # spawning too many workers async with limiter: task_status.started() result = await trio_parallel.run_sync( sync_fn, *job_item, cancellable=cancellable, limiter=trio.CapacityLimiter(1), ) await send_chan.send(result)
async def _async_get_all_clients(self, agent=None, deep=True, last_attempt=True): ret = [] query = await self._async_status() if not query or 'clients' not in query: return ret async def __compute_client_data(client, queue, limit): async with limit: cli = {} cli['name'] = client['name'] cli['state'] = self._status_human_readable( client['run_status']) infos = client['backups'] if cli['state'] in ['running']: cli['last'] = 'now' cli['last_attempt'] = 'now' elif not infos: cli['last'] = 'never' cli['last_attempt'] = 'never' else: convert = True infos = infos[0] if self.server_version and self.server_version < BURP_STATUS_FORMAT_V2: cli['last'] = infos['timestamp'] convert = False # only do deep inspection when server >= BURP_STATUS_FORMAT_V2 if deep: logs = await self._async_get_backup_logs( infos['number'], client['name']) cli['last'] = logs['start'] else: cli['last'] = utc_to_local(infos['timestamp']) if last_attempt: last_backup = await self._async_get_last_backup( client['name']) if convert: cli['last_attempt'] = utc_to_local( last_backup['timestamp']) else: cli['last_attempt'] = last_backup['timestamp'] queue.append(cli) clients = query['clients'] limiter = trio.CapacityLimiter(self.concurrency) async with trio.open_nursery() as nursery: for client in clients: nursery.start_soon(__compute_client_data, client, ret, limiter) return ret
async def main(): global task_queued print("Begin parent") with trio.move_on_after(20): async with trio.open_nursery() as nursery: lock = trio.CapacityLimiter(MAX_WORKERS) print("Begin nursery") await nursery.start(task, nursery, "1", lock) task_queued += 1 print("Waiting for children") print("End parent")
def __init__(self): self.limit = trio.CapacityLimiter(config.conns * 5) self.address_details_url = ( "http://restapi.amap.com/v3/geocode/regeo?key={}&s=rsv3&location={},{}" ) self.image_pools = {} self.res_pools = {} self.event_path = f"{config.exif_result_path_name}/{moment.now().format('YYYY-MM-DD hh:mm:ss')}" info(f"事件路径: {initPath(self.event_path)}") self.image_path = os.path.join(self.event_path, "images") if config.save_image: info(f"图片路径: {initPath(self.image_path)}")
async def worker(job_item, task_status): # Backpressure: hold limiter for entire task to avoid spawning too many workers async with limiter: task_status.started() result = await sync_runner(sync_fn, job_item, cancellable=cancellable, limiter=trio.CapacityLimiter(1)) if chunksize == 1: await send(result) else: for r in result: await send(r)
async def getUAs(): global MAXNUMS """ 爬行任务调度 """ limit = trio.CapacityLimiter(LIMIT) while TASKS: MAXNUMS = len(list(TASKS)) loger.info(colored(f'当前任务量:{MAXNUMS}', 'red')) await trio.sleep(1) async with trio.open_nursery() as nursery: for item in list(TASKS): nursery.start_soon(getUAsitem, item, limit)
async def resolve_domains(self): limit = trio.CapacityLimiter(defaults.DNS_LIMIT) results = [] async with trio.open_nursery() as nursery: for domain, _ in self.iter_domains(): nursery.start_soon(query_dns, domain, self.nameservers, results, limit, None) results = dict(results) for domain, domain_data in self.iter_domains(): domain_data['ip_addresses'] = { ip: ip_payload(ip) for ip in results.get(domain, []) }
async def main(): limit = trio.CapacityLimiter(4) async with trio.open_nursery() as nursery: for item in datas[1:]: itemdatas = { key: item[index] for index, key in enumerate( ["Time", "Latitede", "Longitude", "Deep", "Level"]) } itemdatas['Time'] = moment.date( itemdatas['Time']).format("YYYY-MM-DDThh:mm:ss") itemdatas['Deep'] = int(float(itemdatas['Deep'])) itemdatas['Address'] = ['-'] nursery.start_soon(save, itemdatas, limit)
async def fetch(): limit = trio.CapacityLimiter(max_requests) async def async_fetch(url, headers, timeout, **kwargs): async with httpx.AsyncClient() as client: chan[url] = await client.get(url, headers=default_headers, timeout=timeout, **kwargs) async with trio.open_nursery() as nursery: for url in urls: async with limit: nursery.start_soon(async_fetch, url, headers, timeout, **kwargs)
async def _async_get_all_backup_logs(self, client, forward=False, deep=False): ret = [] backups = await self._async_get_client(client) queue = [] limit = trio.CapacityLimiter(self.concurrency) async with trio.open_nursery() as nursery: for back in backups: nursery.start_soon(self._async_get_backup_logs, back['number'], client, forward, deep, queue, limit) ret = sorted(queue, key=lambda x: x['number']) return ret
async def async_reduce( function: Callable[[T, U], Awaitable[U]], iterable: AsyncIterable[T], max_concurrent, initializer=SENTINEL, ) -> AsyncIterator[AsyncIterable[U]]: # pylint: disable=unsubscriptable-object send_result, receive_result = trio.open_memory_channel[U](0) limiter = trio.CapacityLimiter(max_concurrent) collected_result = initializer async def wrapper(prev_done: trio.Event, self_done: trio.Event, item: T) -> None: nonlocal collected_result input_item = await wait_for(item) if collected_result is SENTINEL: # We are working on the first item, and initializer was not set. collected_result = input_item else: async with limiter: # pylint: disable=not-async-context-manager collected_result = await function(collected_result, input_item) await prev_done.wait() self_done.set() async def consume_input(nursery) -> None: prev_done = trio.Event() prev_done.set() async for item in iterable: self_done = trio.Event() nursery.start_soon(wrapper, prev_done, self_done, item) prev_done = self_done await prev_done.wait() await send_result.send(collected_result) await send_result.aclose() async with trio.open_nursery() as nursery: nursery.start_soon(consume_input, nursery) yield receive_result nursery.cancel_scope.cancel()
async def main(): _, root_folder = sys.argv oauth_consumer_key = os.environ['SMUGMUG_API_KEY'] oauth_consumer_secret = os.environ['SMUGMUG_API_SECRET'] oauth_token = os.environ['SMUGMUG_OAUTH_ACCESS_TOKEN'] oauth_token_secret = os.environ['SMUGMUG_OAUTH_TOKEN_SECRET'] api = SmugMugApi(oauth_consumer_key, oauth_consumer_secret, oauth_token, oauth_token_secret) authuser_response = await api.get_authuser() folder_node_endpoint = authuser_response['Response']['User']['Uris']['Node'] while root_folder: if not root_folder.endswith('/'): root_folder += '/' next_part, root_folder = root_folder.split('/', 1) async for node in list_nodes(api, folder_node_endpoint): if node['Name'] == next_part: folder_node_endpoint = node['Uri'] break else: raise Exception('No folder', next_part) async for node in list_nodes(api, folder_node_endpoint): if node['Type'] != 'Album': continue albumkey = node['Uris']['Album'].split('/')[-1] album_endpoint = '/api/v2/album/' + albumkey print(f'Resetting album {album_endpoint}') await api._request_json('PATCH', album_endpoint, headers={'Content-Type': 'application/json'}, json={'Privacy': 'Unlisted', 'Password': os.environ['ALBUM_PASSWORD'], 'Keywords': 'smog.upload'}) # uncomment to skip retagging images # continue next_page = album_endpoint while next_page: album_response = await api.list_images(next_page) async with trio.open_nursery() as nursery: limit = trio.CapacityLimiter(8) for image in album_response['Response'].get('AlbumImage', []): nursery.start_soon(reset_image_keywords, limit, api, image['Uri']) next_page = album_response['Response']['Pages'].get('NextPage')
async def _async_get_all_backup_logs(self, client, forward=False): ret = [] backups = await self._async_get_client(client) # queue = trio.Queue(len(backups)) queue = [] limit = trio.CapacityLimiter(self.concurrency) async with trio.open_nursery() as nursery: for back in backups: nursery.start_soon(self._async_get_backup_logs, back['number'], client, forward, queue, limit) # while not queue.empty(): # tmp = await queue.get() # ret.append(tmp) # ret = sorted(ret, key=lambda x: x['number']) ret = sorted(queue, key=lambda x: x['number']) return ret