示例#1
0
 async def recv(self):
     if not self.closed:
         async with TaskGroup(wait=any) as g:
             receiver = await g.spawn(self.incoming.get)
             await g.spawn(self.closing.wait)
         if g.completed is receiver:
             return receiver.result
示例#2
0
    async def send(uri):
        c = heh[0]

        async def sendit():
            await c.send(uri)
            resp = await c.recv()
            _uri = uri.split(' ', 1)[-1]
            msg = f'not :: {_uri}' if resp else f'run :: {_uri}'
            clog.debug(msg)
            return resp

        try:
            async with TaskGroup(name='client send', wait=any) as send_or_exit:
                exit_task = await send_or_exit.spawn(exit, send_or_exit)
                send_task = await send_or_exit.spawn(sendit)

            try:
                resp = send_task.result
                return resp
            except TaskCancelled:
                return
            except RuntimeError as e:  # FIXME not quite right?
                clog.error(
                    e
                )  # not eure what is causing this ... maybe a connection error?

        except (EOFError, BrokenPipeError, TaskGroupError) as e:
            if isinstance(e, TaskGroupError):
                if EOFError not in e.errors:
                    raise e

            c = await auth()
            heh[0] = c
            return await send(uri)
示例#3
0
async def main(label_type, shop_id, module_id_range, tmn_sn_range,
               current_value_range, status_tuple, ip, port, send_interval,
               monitor_interval):
    print(
        "label_type: {}, shop_id: {}, module_id_min: {}, module_id_max: {}, tmn_sn_min: {}, tmn_sn_max: {}, current_value_min: {}, current_value_max: {}, status_tuple: {}, ip: {}, port: {}, send_interval: {}, monitor_interval: {}"
        .format(label_type, shop_id, module_id_range[0], module_id_range[1],
                tmn_sn_range[0], tmn_sn_range[1], current_value_range[0],
                current_value_range[1], status_tuple, ip, port, send_interval,
                monitor_interval))

    data_frame = CurrentDataFrame(data_type=label_type,
                                  shop_id=shop_id,
                                  module_id_min=module_id_range[0],
                                  module_id_max=module_id_range[1],
                                  current_value_min=current_value_range[0],
                                  current_value_max=current_value_range[1],
                                  tmn_sn_min=tmn_sn_range[0],
                                  tmn_sn_max=tmn_sn_range[1],
                                  status_tuple=status_tuple)

    tg = TaskGroup()
    counters = [0 for _ in range(0, len(data_frame.to_hex_str_list()))]
    print("tmn count:", len(counters))
    monitor_task = await spawn(monitor, counters, monitor_interval)
    for i, msg in enumerate(data_frame.to_hex_str_list()):
        await tg.spawn(task, i, msg, ip, port, send_interval, counters)
    await monitor_task.join()
示例#4
0
async def client_handler(client, addr):
    print(f'{addr[0]} connected')
    async with client:
        clientStream, output = client.as_stream(), Queue()
        async with TaskGroup(wait=any) as workers:
            await workers.spawn(outgoing, clientStream, output)
            await workers.spawn(incoming, clientStream, output)
    print(f'{addr[0]} disconnected')
示例#5
0
文件: main.py 项目: SV-Seeker/pi-rov
async def server(host, port):
    # Server task groups
    async with TaskGroup() as group:
        await group.spawn(feed.dispatcher)
        await group.spawn(out_feed.dispatcher)
        await group.spawn(tcp_server, host, port, connection_handler)

        for rov_task in rov_tasks:
            await group.spawn(rov_task, out_feed)
示例#6
0
        async def _parallele():

            async with TaskGroup(wait=all) as g:
                for child in _children:
                    await g.spawn(child)

            success = len(list(filter(bool, g.results)))

            return success >= _succes_threshold
示例#7
0
 async def relay(self, conn, remote_conn):
     t1 = await spawn(self._relay(conn, remote_conn))
     t2 = await spawn(self._relay(remote_conn, conn))
     try:
         async with TaskGroup([t1, t2]) as g:
             task = await g.next_done(cancel_remaining=True)
             await task.join()
     except CancelledError:
         pass
示例#8
0
async def main():
    async with TaskGroup() as g:
        await g.spawn(dispatcher)
        await g.spawn(subscriber, 'child1')
        await g.spawn(subscriber, 'child2')
        await g.spawn(subscriber, 'child3')
        ptask = await g.spawn(producer)
        await ptask.join()
        await g.cancel_remaining()  # 取消并从组中删除所有剩余的非守护任务
示例#9
0
async def main() -> None:
    names = (kw for kw in kwlist if len(kw) <= MAX_KEYWORD_LEN)
    domains = (f'{name}.dev'.lower() for name in names)
    async with TaskGroup() as group:  # <3>
        for domain in domains:
            await group.spawn(probe, domain)  # <4>
        async for task in group:  # <5>
            domain, found = task.result
            mark = '+' if found else ' '
            print(f'{mark} {domain}')
 async def relay(self, remote_stream):
     t1 = await spawn(self._relay(self._stream, remote_stream))
     t2 = await spawn(self._relay2(remote_stream, self._stream))
     try:
         async with TaskGroup([t1, t2]) as g:
             task = await g.next_done()
             await task.join()
             await g.cancel_remaining()
     except CancelledError:
         pass
    async def process_traces(*,
                             dataset_base: str = _CONFIG.path,
                             force: bool = False):
        mkdirs(join(dataset_base, 'traces'))

        _logger.info("Reading packet traces...")

        chunks = _CONFIG.extract.chunks
        sites_per_chunk = _CONFIG.extract.max_sites_per_store

        if not LiberatoreDataset.__check_store_presence(dataset_base) or force:
            max_worker_proc_old = curio.workers.MAX_WORKER_PROCESSES
            curio.workers.MAX_WORKER_PROCESSES = _CONFIG.extract.max_cpu_cores

            before = perf_counter()

            if _CONFIG.extract.max_cpu_cores > 1 and chunks > 1:
                async with TaskGroup(wait=all) as g:
                    for chunk_id in range(chunks):
                        await g.spawn(
                            run_in_process,
                            partial(
                                LiberatoreDataset._translate_site_trace_chunk,
                                worker=True,
                                queue=get_queue(),
                                log_level=getLogger().level),
                            chunk_id * sites_per_chunk,
                            (chunk_id + 1) * sites_per_chunk, chunk_id,
                            dataset_base)
            elif _CONFIG.extract.max_cpu_cores == 1:
                for chunk_id in range(chunks):
                    LiberatoreDataset._translate_site_trace_chunk(
                        chunk_id * sites_per_chunk,
                        (chunk_id + 1) * sites_per_chunk, chunk_id,
                        dataset_base)
            else:
                LiberatoreDataset._translate_site_trace_chunk(
                    0, 2000, 0, dataset_base)

            curio.workers.MAX_WORKER_PROCESSES = max_worker_proc_old

            if LiberatoreDataset.__check_store_presence(dataset_base):
                _logger.info(
                    f"Trace processing completed in %s. Deleting PCAP logs...",
                    duration(seconds=perf_counter() - before).in_words())
                remove_recursive(join(dataset_base, 'pcap-logs'))

                _logger.info("Packet traces processing complete.")
            else:
                _logger.error(f"Trace processing failed after %s",
                              duration(seconds=perf_counter() - before))
        else:
            _logger.info(
                "Skipping trace processing as there are already processed traces."
            )
示例#12
0
async def incoming(clientStream, output):
    try:
        lines, received = Queue(), Queue()
        async with TaskGroup(wait=any) as workers:
            await workers.spawn(interpret, received)
            await workers.spawn(receive, lines, received)
            async for line in clientStream:
                await lines.put(line)
    except CancelledError:
        await clientStream.write(b'Server shutting down\n')
        raise
示例#13
0
文件: main.py 项目: SV-Seeker/pi-rov
async def connection_handler(client, addr):
    logger.info('connection from %s', addr)
    async with client:
        client_stream = client.as_stream()
        async with TaskGroup(wait=any) as workers:
            # connect client stream to main feed
            await workers.spawn(out_feed.outgoing, client_stream)
            await workers.spawn(feed.incoming, client_stream)
            # TODO: incoming feed message parsing

        # May not need this
        await feed.publish(messages.EXIT)
    logger.info('connection lost %s', addr)
示例#14
0
async def main():
    urls = [ 
        'http://www.dabeaz.com/cgi-bin/saas.py?s=5',   # Sleep 5 seconds (cancelled)
        'http://www.dabeaz.com/cgi-bin/saas.py?s=10',  # Sleep 10 seconds (cancelled)
        'http://www.dabeaz.com/cgi-bin/fib.py?n=10',   # 10th Fibonacci number (succeeds)
        ] 

    results = []
    async with TaskGroup(wait=any) as g:
        for url in urls:
            await g.spawn(fetch_url, url)

    print(g.completed.result)
示例#15
0
async def chat_handler(client, addr):
    print('Connection from', addr)
    async with client:
        client_stream = client.as_stream()
        await client_stream.write(b'Your name: ')
        name = (await client_stream.readline()).strip()
        await publish((name, b'joined\n'))

        async with TaskGroup(wait=any) as workers:
            await workers.spawn(outgoing, client_stream)
            await workers.spawn(incoming, client_stream, name)

        await publish((name, b'has gone away\n'))

    print('Connection closed')
示例#16
0
文件: chat.py 项目: luvjoey1996/curio
async def chat_handler(client, addr):
    log.info('Connection from %r', addr)
    local = {'address': addr}
    async with client:
        client_stream = client.as_stream()
        await client_stream.write(b'Your name: ')
        name = (await client_stream.readline()).strip()
        await publish((name, b'joined\n'), local)

        async with TaskGroup(wait=any) as workers:
            await workers.spawn(outgoing, client_stream)
            await workers.spawn(incoming, client_stream, name, local)

        await publish((name, b'has gone away\n'), local)

    log.info('%r connection closed', addr)
示例#17
0
    async def auth():
        ch = Channel(chan)

        async def connect(_ch=ch, authkey=encoded):
            connection = await _ch.connect(authkey=encoded)
            clog.debug(f'got connection {connection}')
            return connection

        async with TaskGroup(name='client auth', wait=any) as auth_or_exit:
            clog.info('waiting for sync services to start')
            exit_task = await auth_or_exit.spawn(exit, auth_or_exit)
            conn_task = await auth_or_exit.spawn(connect)

        connection = conn_task.result
        clog.debug(str(connection))
        return connection
示例#18
0
    async def flow(self, *tasks):
        async with TaskGroup(tasks=tasks) as ws:
            incoming = await ws.spawn(self._handle_incoming)
            outgoing = await ws.spawn(self._handle_outgoing)
            finished = await ws.next_done()

            if finished is incoming:
                await self.outgoing.put(None)
                await outgoing.join()
            elif finished in tasks:
                # Task is finished.
                # We ask for the outgoing to finish
                if finished.exception:
                    await self.close(1011, 'Task died prematurely.')
                else:
                    await self.close()
                await outgoing.join()
示例#19
0
async def open_tcp_stream(hostname, port, delay=0.3):
    # Get all of the possible targets for a given host/port
    targets = await socket.getaddrinfo(hostname, port, type=socket.SOCK_STREAM)
    if not targets:
        raise OSError(f'nothing known about {hostname}:{port}')

    # Cluster the targets into unique address families (e.g., AF_INET, AF_INET6, etc.)
    # and make sure the first entries are from a different family.
    families = [
        list(g) for _, g in itertools.groupby(targets, key=lambda t: t[0])
    ]
    targets = [fam.pop(0) for fam in families]
    targets.extend(itertools.chain(*families))

    # List of accumulated errors to report in case of total failure
    errors = []

    # Task group to manage a collection concurrent tasks.
    # Cancels all remaining once an interesting result is returned.
    async with TaskGroup(wait=object) as group:

        # Attempt to make a connection request
        async def try_connect(sockargs, addr, errors):
            sock = socket.socket(*sockargs)
            try:
                await sock.connect(addr)
                return sock
            except Exception as e:
                await sock.close()
                errors.append(e)

    # Walk the list of targets and try connections with a staggered delay

        for *sockargs, _, addr in targets:
            await group.spawn(try_connect, sockargs, addr, errors)
            async with ignore_after(delay):
                task = await group.next_done()
                if not task.exception:
                    group.completed = task
                    break

    if group.completed:
        return group.completed.result
    else:
        raise OSError(errors)
示例#20
0
async def ws_adapter(in_q, out_q, client, _):
    """A simple, queue-based Curio-Sans-IO websocket bridge."""
    client.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
    wsconn = WSConnection(SERVER)
    closed = False

    while not closed:
        wstask = await spawn(client.recv, 65535)
        outqtask = await spawn(out_q.get)

        async with TaskGroup([wstask, outqtask]) as g:
            task = await g.next_done()
            result = await task.join()
            await g.cancel_remaining()

        if task is wstask:
            wsconn.receive_bytes(result)

            for event in wsconn.events():
                cl = event.__class__
                if cl in DATA_TYPES:
                    await in_q.put(event.data)
                elif cl is ConnectionRequested:
                    # Auto accept. Maybe consult the handler?
                    wsconn.accept(event)
                elif cl is ConnectionClosed:
                    # The client has closed the connection.
                    await in_q.put(None)
                    closed = True
                else:
                    print(event)
            await client.sendall(wsconn.bytes_to_send())
        else:
            # We got something from the out queue.
            if result is None:
                # Terminate the connection.
                print("Closing the connection.")
                wsconn.close()
                closed = True
            else:
                wsconn.send_data(result)
            payload = wsconn.bytes_to_send()
            await client.sendall(payload)
    print("Bridge done.")
示例#21
0
async def manager(chan, syncword):
    encoded = syncword.encode()
    currently_running_urls = set()

    async def listen_for_new_conns(task_group):
        while True:
            ch = Channel(chan)
            try:
                connection = await ch.accept(authkey=encoded)
                mlog.info(f'new connection created {connection}')
                await task_group.spawn(manage_single_connection, connection,
                                       currently_running_urls)
                await ch.close(
                )  # sort of strange that we need this? can we connect again later !?
            except ConnectionResetError as e:
                mlog.warning(
                    'client connection attempt did not terminate property')

    async with TaskGroup(name='manager') as connection_tasks:
        await connection_tasks.spawn(exit, connection_tasks)
        await connection_tasks.spawn(listen_for_new_conns, connection_tasks)
示例#22
0
async def socks5_handle(client, addr):
    print('accept:', addr)
    async with client:
        data = await client.recv(3)
        assert data == b'\x05\x01\x00', 'auth err'
        await client.sendall(b'\x05\x00')
        data = await client.recv(5)
        assert data[:4] == b'\x05\x01\x00\x03', 'addr1 err'
        ln = data[4]
        data = await client.recv(data[4] + 2)
        remote = socket(AF_INET, SOCK_STREAM)
        host, port = '', 16813
        await remote.connect((host, port))
        addr_data = b'\x03' + chr(ln).encode() + data
        enc = encryptx.Encryptor('woshimima1234', 'aes-256-cfb')
        await remote.sendall(enc.encrypt(addr_data))
        await client.sendall(b'\x05\x00\x00\x01\x00\x00\x00\x00\x10\x10')
        async with TaskGroup() as group:
            await group.spawn(pipe, client, remote, enc.encrypt)
            await group.spawn(pipe, remote, client, enc.decrypt)
            print('pipe: %r <-> %r' %
                  tuple([s.getsockname() for s in (client, remote)]))
    print('closed:', addr)
示例#23
0
async def main():
    logger.info("Start make_stop_flag_file_first.....")
    await sleep(5)
    # 重新加载vms 的配置文件
    await vmsH.reload_vms_config_info()
    # 动态获取VMS的配置内容
    app_vmjob_list.clear()

    local_mac_address = VMSModifyHandler.get_local_mac_address()

    had_record_mac_address_list = []
    for one_config in vmsH.get_vms_configs():
        config_mac_address = one_config['macAddress']
        logger.info('localMacAddress={0}, configMacAddress={1}'.format(
            local_mac_address, config_mac_address))
        if config_mac_address != '':
            # config_mac_address 现在接受多mac地址情况
            all_config_mac_address = config_mac_address.split(';')
            for iter_mac_address in all_config_mac_address:
                if iter_mac_address != '':
                    if iter_mac_address not in had_record_mac_address_list \
                            and iter_mac_address == local_mac_address:
                        had_record_mac_address_list.append(iter_mac_address)

    logger.info('had_record_mac_address_list = {0}'.format(
        had_record_mac_address_list))

    for one_config in vmsH.get_vms_configs():
        config_mac_address = one_config['macAddress']

        enable_add = False
        if local_mac_address in had_record_mac_address_list:
            enable_add = find_in_record_mac_address_list(
                config_mac_address, had_record_mac_address_list)
        elif config_mac_address == '':
            enable_add = local_mac_address not in had_record_mac_address_list

        if enable_add:
            logger.info('call app_vmjob_list.add function')
            app_vmjob_list.add(
                VMJob(
                    vmid=one_config['vmid'],
                    vmname=one_config['vmname'],
                    enable=one_config['enable'] == 'true',
                    enable_ads=one_config['enable_ads'] == 'true',
                    start_cmd=one_config['startCommand'],
                    appium_cmd=one_config['appiumCommand'],
                    max_run_time=get_best_max_run_time(),  # 60 * 5 * 1000 # 毫秒
                    extend_vm_info=one_config['extend_vm_info']))

    if len(app_vmjob_list) == 0:
        logger.info('No have vmjob ...')
        exit(9)

    logger.info("Start working..... vmjob count = {0}".format(
        len(app_vmjob_list)))
    async with TaskGroup() as g:
        await g.spawn(dispatcher)
        # await g.spawn(subscriber, 'child1')
        # await g.spawn(subscriber, 'child2')
        await g.spawn(subscriber, 'VMS-Monitor')
        ptask = await g.spawn(producer)
        await ptask.join()
        await g.cancel_remaining()
示例#24
0
 async def main(c1, c2):
     async with TaskGroup() as g:
         await g.spawn(server, c1)
         await g.spawn(client, c2)
示例#25
0
文件: chat.py 项目: luvjoey1996/curio
async def chat_server(host, port):
    async with TaskGroup() as g:
        await g.spawn(dispatcher)
        await g.spawn(tcp_server, host, port, chat_handler)
示例#26
0
文件: crawl.py 项目: sjl421/asyncrawl
async def main(addr, room_ids):
    async with TaskGroup() as g:
        for i in room_ids:
            await g.spawn(get_danmu(addr, i))
示例#27
0
async def multi_probe(domains: Iterable[str]) -> AsyncIterator[Result]:
    async with TaskGroup() as group:
        for domain in domains:
            await group.spawn(probe, domain)
        async for task in group:
            yield task.result