async def main(): q = QueueClient(STORAGE_ACCOUNT, STORAGE_KEY) #print("Table Deletion", end=" ") #print((await tq.deleteTable('aiotest')).status) print("Queue Creation", end=" ") print((await q.createQueue('aiotest')).status) print("\nInsertion:") tasks = [] for _ in range(OPERATION_COUNT): tasks.append(Task(q.putMessage('aiotest', 'hello world'))) start = time() res = await gather(*tasks) print("{} operations/s".format(OPERATION_COUNT/(time()-start))) #print([r.status for r in res]) print("Retrieval:") receipts = [] start = time() for i in range(int(OPERATION_COUNT/32)+1): async for msg in q.getMessages('aiotest', numofmessages=32): receipts.append((msg['MessageId'], msg['PopReceipt'])) print("{} operations/s".format(OPERATION_COUNT/(time()-start))) print("Deletion:") tasks = [] for r in receipts: tasks.append(Task(q.deleteMessage('aiotest', *r))) start = time() res = await gather(*tasks) print("{} operations/s".format(OPERATION_COUNT/(time()-start))) print() await q.close()
def _send_then_recv(self, send, recv): fut_recv = Task(recv()) result = None for i in range(self.max_tries): try: yield from send() except ConnectionError as e: logging.warn("Failed to send RADIUS request: %s" % e) yield from sleep(TIMEOUT, loop=self.loop) continue try: result = yield from wait_for(shield(fut_recv), self.timeout) break except TimeoutError: # No need to restart task, since it is protected by shield(). logging.warning("Timeout, re-send RADIUS request.") except ValueError as e: logging.warning("Malformed RADIUS packet received: %s" % e) logging.info("Please check the shared secret.") fut_recv = Task(self._recv_response()) except ConnectionError as e: logging.warn("Failed to receive RADIUS response: %s" % e) yield from sleep(TIMEOUT, loop=self.loop) fut_recv = Task(self._recv_response()) if result is None: logging.warning("Timeout. No valid RADIUS response.") fut_recv.cancel() return result
def client_connected(self, client_reader, client_writer): # a new client connected, our reader _ReaderWrapper, will let # us know when a subsequent http request arrives after this one # but we need to keep a handle on the writer client_reader._writer = client_writer Task(self.handle(client_reader, client_writer))
def show(self, context): print("SHOW:" + str(self.selectionID)) if (self.selectionID != -1): space = context.space_data world = bpy.data.worlds[0] try: #select the nodes: bookmark = world.bookmarks[self.selectionID] tree = BPointer.getByID(bookmark.nodetreeID).get() treeName = tree.name spaceContext = SpaceContext.getCurrentSpaceContext(context) spaceContext.logicTreeBrowsing = True spaceContext.logicTreeName = treeName for node in tree.nodes: node.select = False for id in bookmark.nodeIDS: print("TRY TO GET BPOINTER:" + str(id.value)) bpNode = BPointer.getByID(id.value) bpNode.get().select = True print("B:" + bpNode.get().name) # bpy.ops.node.view_selected() Task( focusNodetree(context.window, context.screen, context.area, context.region, context.space_data)) except: print(traceback.format_exc()) return {'FINISHED'}
def __init__(self, host: str = "localhost", port: int = 9090, listen: int = 1000): """ Конструктор сервера :param host: :param port: :param listen: """ self.loop = get_event_loop() self.loger = Loger(host, port) self._server_sock = socket() self._server_sock.setblocking(False) self._server_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self._server_sock.bind((host, port)) self._server_sock.listen(listen) self._users = list() Task(self._server()) self.loger.server_is_running() try: self.loop.run_forever() except KeyboardInterrupt: self.loger.server_is_stopped()
def typed_asyncoro(*args, **kwargs): runtime._pc_level += 1 coro = func(*args, **kwargs) if rettype: decl = returnType(rettype, wrap=False) else: try: decl = coro.send(None) except StopIteration as exc: runtime._pc_level -= 1 return exc.value except Exception: runtime._pc_level -= 1 raise if runtime.options.no_async: while True: try: coro.send(None) except StopIteration as exc: runtime._pc_level -= 1 if decl is not None: __reconcile(decl, exc.value) return decl except Exception: runtime._pc_level -= 1 raise if pc: coro = _wrap_in_coro(_ProgramCounterWrapper(runtime, coro)) task = Task(coro, loop=runtime._loop) task.add_done_callback(lambda t: _reconcile(decl, t)) return _ncopy(decl)
async def test_cancel_loading_cancels_existing_loading_task(self): preloader = Preloader() mock_coroutine_function = AsyncMock() mock_old_running_loading_task = Task(mock_coroutine_function()) preloader.running_loading_task = mock_old_running_loading_task await preloader.cancel_loading_task() assert mock_old_running_loading_task.cancelled
def _accept_client(self, client_reader, client_writer): """manage new client connections""" task = Task(self._handle_client(client_reader, client_writer)) self.clients[task] = (client_reader, client_writer) def client_done(task): del self.clients[task] task.add_done_callback(client_done)
def __init__(self, loop, port): self.loop = loop self._serv_sock = socket() self._serv_sock.setblocking(0) self._serv_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) self._serv_sock.bind(('', port)) self._serv_sock.listen(5) self._peers = [] Task(self._server())
def __init__(self, url, dbname, *, loop=None): super().__init__() self.url = url self.dbname = dbname self._loop = loop self._http_session = aiohttp.ClientSession() self._queue = Queue(SEND_QUEUE_MAX_SIZE, loop=loop) self.logger = logging.getLogger("influxdbpusher") self._min_size_reached = Event() self._max_size_reached = Event() self._push_task = Task(self._push_data_loop(), loop=loop)
async def get_items(cls, req, session): placement = await cls._get_placement(req, session) if placement is None: return cls._build_response(404) explict_fallbacks = req.query.pop('explict_fallbacks', False) input_external_variables = req.query show_details = req.query.pop('show_details', placement.get('show_details')) distribute_items = placement.get('distribute_items') recos = slots = [] recos_key = 'slots' slots_coros = [] for slot in placement['variations'][0]['slots']: coro = cls._get_slot_recos_async(slot, input_external_variables, session, show_details) slots_coros.append(Task(coro)) for coro in slots_coros: slots.append(await coro) valid_slots = [] for slot in slots: slot_recos = slot['items'] has_fallback = [ True for fallback in slot_recos['fallbacks'] if fallback ] if slot_recos['main'] or has_fallback: valid_slots.append(slot) if not valid_slots: return cls._build_response(404) if not explict_fallbacks or placement['is_redirect']: for slot in slots: slot['items'] = cls._get_all_slot_recos(slot['items']) if distribute_items: recos_key = 'distributed_items' recos = cls._get_all_recos_from_slots(slots) recos = cls._distribute_items(recos) if placement['is_redirect']: return cls._build_redirect_response(recos, distribute_items, req) else: placement = { 'name': placement['name'], 'small_hash': placement['small_hash'] } placement[recos_key] = recos return cls._build_recos_response(placement)
def getCurrentSpaceContext(context): signature = getCurrentSpaceSignature(context) world = bpy.data.worlds[0] pos = world.spaceMapping.find(signature) if pos == -1: # print("NO CONTEXT FOUND FOR SIG:"+signature) Task(createSpaceProperty(signature)) return None else: # print("FOUND CONTEXT FOR SIG:"+signature) return world.spaceMapping[pos]
def recycle(self, request, response): # ok, recycle this reader and writer for a new connection # used for keep alive requests # first make sure we consumed the body log.debug('recycling') if not request.body_consumed: log.debug('consuming body') yield from request.body() log.debug('done') Task(self.handle(request.reader, response.writer, True))
def __call__(self, loop, coro): current = Task.current_task(loop=loop) task = Task(coro, loop=loop) try: task._context = current._context.copy() except AttributeError: pass try: task._context_stack = current._context_stack.copy() except AttributeError: pass return task
def _schedule_task(self, coro: Coroutine[Any, Any, Any]): """ All tasks in scope of this object should be scheduled through this method. At exit these tasks will be awaited in the `wait_closed` method. They accompanied by the done callback, which removes task from `_tasks` :param coro: Coroutine to wrapping into task :return: """ task: Task = Task(coro) task.add_done_callback( partial(lambda _self, _task: _self._tasks.discard(_task), self)) self._tasks.add(task)
def _schedule_task(self, coro: Coroutine[Any, Any, Any]) -> Task: """ Schedules task into running loop and adds it to self._task. self._tasks will be awaited at exit. :param coro: Coroutine to wrap into task :return: Task """ task: Task = Task(coro) task.add_done_callback(self._task_finalizer) self._tasks.add(task) return task
def _main(): import logging import asyncio log = logging.getLogger() log.setLevel(logging.DEBUG) log.addHandler(logging.StreamHandler()) loop = asyncio.get_event_loop() from asyncio import Task task = Task(_watcher(loop)) loop.run_until_complete(task)
def __init__(self, sock_server: 'Server', sock: socket, address: tuple): """ Конструктор сокета :param sock_server: :param sock: :param address: """ self.loop = sock_server.loop self.address = address self._sock = sock self._server = sock_server self.entered = False Task(self._peer_handler())
def bots_longpoll_run(self, custom_process=False): task = self.add_task(Task(self.bots_longpoll_processor())) if custom_process: return task self.logger.info("Started to process messages") try: self.loop.run_until_complete(task) except (KeyboardInterrupt, SystemExit): self.loop.run_until_complete(self.stop()) except asyncio.CancelledError: pass
async def __process(self): result = await self._client.query(self._method, self._url, self._data) if result['type'] == 'sync': return result['metadata'] if result['type'] == 'async': metadata = result['metadata'] jobs = self._get_jobs(metadata) tasks = [Task(it) for it in jobs] self._id = metadata['id'] wait_url = '/1.0/operations/{id}/wait'.format(id=self._id) result = await self._client.query('get', wait_url) await wait(tasks) return result['metadata']
def task_done_callback(finished_task: Task) -> None: limiting_semaphore.release() response: Optional[Any] = None try: response = result_callback( finished_task, passed_iteration_value_context_var.get()) except: LOG.exception(f'Unexpected exception in result callback.') finally: if iscoroutine(response): Task(coro=response).add_done_callback(signal_callback_finished) return signal_callback_finished()
async def _try_state(self, state, objs, timeout=None): objs = [obj for obj in objs if obj.state != OpenState.disabled] if not objs: return action = "open" if state == OpenState.active else "close" getter = attrgetter(action) tasks = {Task(getter(obj)()): obj for obj in objs} done, pending = await wait(tasks.keys(), timeout=timeout) for task in done: exc = task.exception() if exc: obj = tasks[task] log.error("Failed to %s %r", action, obj.name, exc_info=exc) for task in pending: obj = tasks[task] log.warning("Failed to %s %r after %s seconds", action, obj.name, timeout)
def _inherit_queue(self, coro): """Create a new task inheriting the partition assigned to the current task. If there is no current task, or the curren task does not have any queue will be assinged to the root one. Return a task object """ task = Task(coro, loop=self) task.partition = _find_partition(self) if task._source_traceback: del task._source_traceback[-1] self._partitions[task.partition].add_task(task) task.add_done_callback(self._partitions[task.partition].remove_task, task) return task
def spawn(self, coro, partition=None): """Place and run a coro to a specific and isolated partition, if partition is not given a new one will be created. Return a task object bound to the coro. """ task = Task(coro, loop=self) partition = partition if partition else task task.partition = partition if task._source_traceback: del task._source_traceback[-1] try: self._partitions[partition].tasks.add(task) except KeyError: self._partitions[partition] = _Partition() self._partitions[partition].tasks.add(task) self._p_to_process.add(partition) return task
def consume_unused_data(self): if self.incoming_message is None and len(self.unused_data) >= 4: toUnpack = XDRIntegerUnserializer().continueUnserialization( self.unused_data) messageVersion = (toUnpack >> 28) & 0xf messageId = (toUnpack >> 16) & 0xfff messageType = (toUnpack >> 8) & 0xff self.incoming_message = self.message_factory( messageVersion, messageType, messageId) if self.incoming_message is not None: candidate = self.incoming_message.continueUnserialization( self.unused_data) if candidate is not None: # Unserialization is complete self.incoming_message = None result = [] result.append(candidate) candidate = self.consume_unused_data() if candidate is not None: result.append(candidate) tasks = [ Task(self.message_received(msg), loop=self.loop) for msg in result ]
async def main(): t = TableClient(STORAGE_ACCOUNT, STORAGE_KEY) #print("Table Deletion", end=" ") #print((await t.deleteTable('aiotest')).status) print("Table Creation", end=" ") print((await t.createTable('aiotest')).status) print("Table Query", end=" ") async for item in t.getTables({"$filter": "TableName eq 'aiotest'"}): print(item['TableName'], end=" ") print("\nInsertion:", end=" ") tasks = [] for i in range(OPERATION_COUNT): tasks.append( Task( t.insertEntity( 'aiotest', { "Address": "Mountain View", "Age": 23 + i, "AmountDue": 200.23, "CustomerCode": str( uuid1()), # send this as string intentionally "CustomerSince": datetime.now(), "IsActive": True, "NumberOfOrders": 255, "PartitionKey": "mypartitionkey", "RowKey": "Customer%d" % i }))) start = time() res = await gather(*tasks) print("{} operations/s".format(OPERATION_COUNT / (time() - start))) #print([r.status for r in res]) print("Deletion:") tasks = [] for i in range(OPERATION_COUNT): tasks.append( Task( t.deleteEntity('aiotest', { "PartitionKey": "mypartitionkey", "RowKey": "Customer%d" % i }))) start = time() res = await gather(*tasks) print("{} operations/s".format(OPERATION_COUNT / (time() - start))) #print([r.status for r in res]) print("Upsert:") tasks = [] for i in range(OPERATION_COUNT): tasks.append( Task( t.insertOrReplaceEntity( 'aiotest', { "Address": "Mountain View", "Age": 23 - i, "AmountDue": 0, "CustomerCode": uuid1(), # this updates the entry schema as well "CustomerSince": datetime.now(), "IsActive": True, "NumberOfOrders": 0, "PartitionKey": "mypartitionkey", "RowKey": "Customer%d" % i }))) start = time() res = await gather(*tasks) print("{} operations/s".format(OPERATION_COUNT / (time() - start))) #print([r.status for r in res]) print("Query") async for item in t.queryEntities('aiotest', {"$filter": "Age gt 0"}): print(item['RowKey'], end=" ") print() entities = [] for i in range(OPERATION_COUNT): entities.append({ "Address": "Mountain View", "Age": 23 + i, "AmountDue": 200.23, "CustomerCode": str(uuid1()), # send this as string intentionally "*****@*****.**": "Edm.DateTime", "CustomerSince": datetime.now(), "IsActive": True, "NumberOfOrders": 255, "PartitionKey": "mypartitionkey", "RowKey": "Customer%d" % i }) start = time() res = await t.batchUpdate('aiotest', entities) print("{} operations/s".format(OPERATION_COUNT / (time() - start))) print(res.status) print(res.headers) print(await res.text()) print() await t.close()
def start(self, loop): """Start the application, using the asyncio event loop passed as parameter. This means connecting to known peers, and then starting a server""" Task(BEPClient(self, loop).connect(("127.0.0.1", 22000)))
def wrap(self, coro_ref): return Task(self._coro_executor(coro_ref))
def __init__(self, server, sock, name): self.loop = server.loop self.name = name self._sock = sock self._server = server Task(self._peer_handler())
def handle(request): user_input = request.GET.get("user_input", "") prompt(prompt_text + "\n" + user_input) html = """<html><title>Blender Async Demo</title><body> <h2>User input:</h2> <form method="GET"> <textarea name="user_input">{0}</textarea> <br> <button type="submit">Submit</button> </form> </body></html>""".format(user_input) return web.Response(body=html.encode('utf-8')) @coroutine def init(loop): app = web.Application(loop=loop) app.router.add_route('GET', '/', handle) srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 9090) prompt(prompt_text + "\n" + user_input) return srv yield from init(asyncio.get_event_loop()) if __name__ == "__main__": asyncio_bridge.register() bpy.ops.bpy.start_asyncio_bridge() Task(test_1())