예제 #1
0
async def test_serve_listeners_connection_nursery(autojump_clock):
    listener = MemoryListener()

    async def handler(stream):
        await trio.sleep(1)

    class Done(Exception):
        pass

    async def connection_watcher(*, task_status=trio.TASK_STATUS_IGNORED):
        async with trio.open_nursery() as nursery:
            task_status.started(nursery)
            await wait_all_tasks_blocked()
            assert len(nursery.child_tasks) == 10
            raise Done

    with pytest.raises(Done):
        async with trio.open_nursery() as nursery:
            handler_nursery = await nursery.start(connection_watcher)
            await nursery.start(
                partial(
                    trio.serve_listeners,
                    handler, [listener],
                    handler_nursery=handler_nursery
                )
            )
            for _ in range(10):
                nursery.start_soon(listener.connect)
async def main():
    """
    This example showcases `as_completed` which is not available in trio
    however we can mimic its behaviour using a trio.Queue storing the results
    of each coroutine. Then reading indefinitely from it until no more tasks
    are scheduled.
    """
    start = time.time()
    q = trio.Queue(MAX_CLIENTS)

    async def jockey(coro, i):
        r = await coro(i)
        await q.put(r)

    async with trio.open_nursery() as nursery:
        """
        Note important aspect of Trio, this parent block is itself a task.
        It may not have any code after the `start_soon` calls, which means
        the parent task won't do any work and simply wait for child tasks.
        But if it does include a checkpoint (the await q.get() in this case)
        the code will run like any other task interleaving with the child ones.
        """
        for i in range(1, MAX_CLIENTS + 1):
            nursery.start_soon(jockey, fetch_async, i)

        count = 0
        while True:
            print('* Parent: Checking for results on queue')
            r = await q.get()
            print('{} {}'.format(">>" * (count + 1), r))
            count += 1
            if not nursery.child_tasks:
                break

    print("Process took: {:.2f} seconds".format(time.time() - start))
    async def job(self) -> None:
        LONG_RUNNING_THRESHOLD = 0.1
        CANCEL_THRESHOLD = 10

        async with self._lock:
            job = None  # TODO identify the job
            nursery = None
            manually_cancelled = False
            begin = trio.current_time()
            try:
                async with trio.open_nursery() as nursery:
                    nursery.cancel_scope.deadline = begin + CANCEL_THRESHOLD

                    @nursery.start_soon
                    async def warn_long_running():
                        await trio.sleep(LONG_RUNNING_THRESHOLD)
                        logger.warning("Long running job on server loop: %s", job)

                    yield

                    # cancel the warning task
                    manually_cancelled = True
                    nursery.cancel_scope.cancel()
            finally:
                assert nursery is not None

                end = trio.current_time()
                if nursery.cancel_scope.cancelled_caught and not manually_cancelled:
                    logger.error("Long running job cancelled after %.1f ms: %s", (end - begin) * 1000, job)
                    raise trio.TooSlowError
                elif end - begin > LONG_RUNNING_THRESHOLD:
                    logger.warning("Long running job finished after %.1f ms: %s", (end - begin) * 1000, job)
예제 #4
0
async def blocking_read_with_timeout(fd, count, timeout):
    print("reading from fd", fd)
    cancel_requested = False

    async def kill_it_after_timeout(new_fd):
        print("sleeping")
        await trio.sleep(timeout)
        print("breaking the fd")
        os.dup2(bad_socket.fileno(), new_fd, inheritable=False)
        # MAGIC
        print("setuid(getuid())")
        os.setuid(os.getuid())
        nonlocal cancel_requested
        cancel_requested = True

    new_fd = os.dup(fd)
    print("working fd is", new_fd)
    try:
        async with trio.open_nursery() as nursery:
            nursery.start_soon(kill_it_after_timeout, new_fd)
            try:
                data = await trio.run_sync_in_worker_thread(os.read, new_fd, count)
            except OSError as exc:
                if cancel_requested and exc.errno == errno.ENOTCONN:
                    # Call was successfully cancelled. In a real version we'd
                    # integrate properly with trio's cancellation tools; here
                    # we'll just raise an arbitrary error.
                    raise BlockingReadTimeoutError from None
            print("got", data)
            nursery.cancel_scope.cancel()
            return data
    finally:
        os.close(new_fd)
async def test_broadcast_channel(autojump_clock):
    with assertPassed(2):
        async with BroadcastChannel() as broadcast, trio.open_nursery() as nursery:
            receive_a = broadcast.add_receiver(10)
            receive_b = broadcast.add_receiver(10)

            @nursery.start_soon
            async def sender():
                async for value in stream((1, 1), (2, 1)):
                    await broadcast.send(value)
                # if this is not closed here, the receiver tasks won't finish,
                # the nursery won't finish, and thus the BroadcastChannel won't exit on its own
                await broadcast.aclose()

            @nursery.start_soon
            async def receiver_a():
                async with Stream(receive_a) as s:
                    await s.expect_after(1, 1)
                    await s.expect_after(1, 2)
                    # wait for the stream to end
                    await s.expect_exit_after(0)

            @nursery.start_soon
            async def receiver_b():
                async with Stream(receive_b) as s:
                    await s.expect_after(1, 1)
예제 #6
0
    async def send(self, value: T) -> None:
        """\
        Sends the value to all receiver channels.
        Closed receivers are removed the next time a value.'is sent using this method.
        This method will send to all receivers immediately,
        but it will block until the message got out to all receivers.

        Suppose you have receivers A and B with buffer size zero, and you send to them:

            await channel.send(1)
            await channel.send(2)

        If only B is actually reading, then `send(2)` will not be called, because `send(1)` can't finish,
        meaning the `2` is not delivered to B either.
        To prevent this, close any receivers that are done, and/or poll receive in a timely manner.
        """
        broken = set()

        async def send(channel):
            try:
                await channel.send(value)
            except trio.BrokenResourceError:
                await channel.aclose()
                broken.add(channel)

        async with trio.open_nursery() as nursery:
            for channel in self._send_channels:
                nursery.start_soon(send, channel)

        self._send_channels -= broken
        broken.clear()
예제 #7
0
파일: vault.py 프로젝트: syncrypt/client
    async def run(self, do_init, do_push, task_status=trio.TASK_STATUS_IGNORED):
        assert self.nursery is None
        async with trio.open_nursery() as nursery:
            self.logger.debug("Opened nursery")
            self.nursery = nursery

            try:
                self.vault.check_existence()
                self.vault.identity.read()
                self.vault.identity.assert_initialized()
            except IdentityNotInitialized:
                self.logger.info("Identity not yet initialized.")
                await self.app.set_vault_state(self.vault, VaultState.UNINITIALIZED)
            except SyncryptBaseException:
                self.logger.exception("Failure during vault initialization")
                await self.app.set_vault_state(self.vault, VaultState.FAILURE)

            self.logger.debug("Finished vault initialization successfully.")
            task_status.started()

            if do_init:
                await self.app.init_vault(self.vault)

            if do_push:
                await self.app.pull_vault(self.vault, full=do_init)
                await self.app.push_vault(self.vault)

            if self.update_on_idle:
                self.nursery.start_soon(self.respond_to_file_changes)
                self.nursery.start_soon(self.watchdog_task)
                self.nursery.start_soon(self.autopull_vault_task)
            await trio.sleep_forever()
        self.logger.debug("Closed nursery")
        self.nursery = None
예제 #8
0
파일: syncrypt.py 프로젝트: syncrypt/client
    async def pull_vault(self, vault, full=False):
        assert vault.state != VaultState.SYNCING

        vault.logger.info('Pulling %s', vault)

        # First, we will iterate through the changes, validate the chain and build up the state of
        # the vault (files, keys, ...). This is called "syncing".
        await self.sync_vault(vault, full=full)

        await self.set_vault_state(vault, VaultState.SYNCING)
        # Then, we will do a change detection for the local folder and download every bundle that
        # has changed.
        # TODO: do a change detection (.vault/metadata store vs filesystem)
        limit = trio.CapacityLimiter(1)

        try:
            # here we should use trimeter too allow for parallel processing
            async with trio.open_nursery():
                async for bundle in self.bundles.download_bundles_for_vault(vault):
                    async with limit:
                        await self.pull_bundle(bundle)

                await self.set_vault_state(vault, VaultState.READY)
        except Exception:
            vault.logger.exception("Failure while pulling vault")
            await self.set_vault_state(vault, VaultState.FAILURE)
        await self.set_vault_state(vault, VaultState.READY)
async def asynchronous():
    start = time.time()
    async with trio.open_nursery() as nursery:
        for i in range(1, MAX_CLIENTS + 1):
            nursery.start_soon(fetch_async, i)

    print("Process took: {:.2f} seconds".format(time.time() - start))
예제 #10
0
파일: syncrypt.py 프로젝트: syncrypt/client
    async def push_vault(self, vault):
        "Push a single vault"

        logger.info('Pushing %s', vault)

        try:
            self.identity.assert_initialized()

            await self.sync_vault(vault)
            limit = trio.CapacityLimiter(1)


            await self.set_vault_state(vault, VaultState.SYNCING)
            async with trio.open_nursery() as nursery:
                await vault.backend.open()
                await self.update_vault_metadata(vault)

                async for bundle in self.bundles.upload_bundles_for_vault(vault):
                    async with limit:
                        await self.push_bundle(bundle)
                        #nursery.start_soon(self.push_bundle, bundle)

                await self.set_vault_state(vault, VaultState.READY)
        except Exception:
            vault.logger.exception("Failure during vault push")
            await self.set_vault_state(vault, VaultState.FAILURE)
예제 #11
0
파일: parallel.py 프로젝트: ziirish/burp-ui
        async def _do_stuff():
            nonlocal client
            nonlocal number
            nonlocal forward
            nonlocal deep
            bucket1 = []
            bucket2 = []
            ret = {}
            query = await self._async_status('c:{0}:b:{1}\n'.format(client, number))
            if not query:
                return ret
            try:
                logs = query['clients'][0]['backups'][0]['logs']['list']
            except KeyError:
                self.logger.warning('No logs found')
                return ret
            async with trio.open_nursery() as nursery:
                if 'backup_stats' in logs:
                    nursery.start_soon(self._async_parse_backup_stats, number, client, forward, None, bucket1)
                if 'backup' in logs and deep:
                    nursery.start_soon(self._async_parse_backup_log, number, client, bucket2)

            if bucket1:
                ret = bucket1[0]
            if bucket2:
                ret.update(bucket2[0])

            ret['encrypted'] = False
            if 'files_enc' in ret and ret['files_enc']['total'] > 0:
                ret['encrypted'] = True
            return ret
예제 #12
0
async def test_basic_structs(mock_clock):
    mock_clock.autojump_threshold = 0.1
    async with server(tree=basic_tree, options={'slow_every': [0, 1], 'busy_every': [0, 0, 1],
                                                'close_every': [0, 0, 0, 1]}) as ow:
        await trio.sleep(0)
        dev = await ow.get_device("10.345678.90")
        await ow.ensure_struct(dev)
        assert await dev.temperature == 12.5
        await dev.set_temphigh(98.25)
        assert await dev.temphigh == 98.25

        # while we're at it, test our ability to do things in parallel on a broken server
        dat = {}
        evt = trio.Event()

        async def get_val(tag):
            await evt.wait()
            dat[tag] = await getattr(dev, tag)

        async with trio.open_nursery() as n:
            n.start_soon(get_val, 'temperature')
            n.start_soon(get_val, 'temphigh')
            n.start_soon(get_val, 'templow')
            await trio.sleep(1)
            evt.set()
        assert dat == {'temphigh': 98.25, 'temperature': 12.5, 'templow': 10.0}, dat
예제 #13
0
파일: parallel.py 프로젝트: ziirish/burp-ui
    async def _async_get_all_clients(self, agent=None, deep=True):
        ret = []
        query = await self._async_status()
        if not query or 'clients' not in query:
            return ret

        async def __compute_client_data(client, queue, limit):
            async with limit:
                cli = {}
                cli['name'] = client['name']
                cli['state'] = self._status_human_readable(client['run_status'])
                infos = client['backups']
                if cli['state'] in ['running']:
                    cli['last'] = 'now'
                elif not infos:
                    cli['last'] = 'never'
                else:
                    infos = infos[0]
                    if deep:
                        logs = await self._async_get_backup_logs(infos['number'], client['name'])
                        cli['last'] = logs['start']
                    else:
                        cli['last'] = infos['timestamp']
                queue.append(cli)

        clients = query['clients']
        limiter = trio.CapacityLimiter(self.concurrency)

        async with trio.open_nursery() as nursery:
            for client in clients:
                nursery.start_soon(__compute_client_data, client, ret, limiter)

        return ret
예제 #14
0
    async def __aenter__(self):
        await super().__aenter__()
        nursery = await self._stack.enter_async_context(trio.open_nursery())

        @nursery.start_soon
        async def emit_updates():
            async for update in self._updates:
                self._enqueue_update(update)
async def main():
    """
    This example showcases how exceptions in Trio bubble to the parent
    and will stop the program, while in asyncio it will simply log a warning.
    """
    async with trio.open_nursery() as nursery:
        for service in SERVICES:
            nursery.start_soon(fetch_ip, service)
예제 #16
0
    async def run(self, *, task_status=trio.TASK_STATUS_IGNORED):
        with DealerRouterSocket(self.ctx, zmq.ROUTER, side=ServerSide) as self.socket:
            async with trio.open_nursery() as self._nursery:
                self.socket.bind(self.endpoint)
                task_status.started()

                await self._nursery.start(self._requests_task)
        logger.info("Server stopped")
예제 #17
0
파일: __init__.py 프로젝트: M-o-a-T/qbroker
async def open_broker(*args, **kwargs):
    """\
        Context manager to create a restarting AMQP connection.
        """
    from .broker import Broker
    async with trio.open_nursery() as nursery:
        async with Broker(*args, nursery=nursery, **kwargs) as b:
            yield b
예제 #18
0
async def test_serve_tcp():
    async def handler(stream):
        await stream.send_all(b"x")

    async with trio.open_nursery() as nursery:
        listeners = await nursery.start(serve_tcp, handler, 0)
        stream = await open_stream_to_socket_listener(listeners[0])
        async with stream:
            await stream.receive_some(1) == b"x"
            nursery.cancel_scope.cancel()
예제 #19
0
async def parent():
    print("parent: connecting to 127.0.0.1:{}".format(PORT))
    client_stream = await trio.open_tcp_stream("127.0.0.1", PORT)
    async with client_stream:
        async with trio.open_nursery() as nursery:
            print("parent: spawning sender...")
            nursery.start_soon(sender, client_stream)

            print("parent: spawning receiver...")
            nursery.start_soon(receiver, client_stream)
예제 #20
0
    async def __aenter__(self):
        await super().__aenter__()

        self.reader, self.writer = await self._stack.enter_async_context(open_serial_connection(self.controller.serial))

        await self._stack.enter_async_context(self._replies_in)
        await self._stack.enter_async_context(self._replies_out)

        nursery = await self._stack.enter_async_context(trio.open_nursery())
        await nursery.start(self._receiver)
예제 #21
0
    async def start_server(endpoint: str='inproc://controller', *,
                           handler_dict: handlers.HandlerCallbackDict=handler_dict):
        async with trio_aio_loop(), zmq_trio_ctx() as ctx, hardware_adapter, trio.open_nursery() as nursery:
            server = HedgehogServer(ctx, endpoint, handler_dict)
            await nursery.start(server.run)

            yield server

            # if an exception leads to this line being skipped, the nursery kills the server anyway
            server.stop()
예제 #22
0
파일: syncrypt.py 프로젝트: syncrypt/client
    async def push(self):
        "Push all registered vaults"
        async with trio.open_nursery() as nursery:
            for vault in self.vaults:
                if not self.identity.is_initialized():
                    logger.error('Identity is not initialized yet')
                    await self.set_vault_state(vault, VaultState.UNINITIALIZED)
                    continue

                nursery.start_soon(self.push_vault, vault)
예제 #23
0
파일: monitor.py 프로젝트: ziirish/burp-ui
 async def run(self):
     async with self.pool:
         try:
             async with trio.open_nursery() as nursery:
                 # listen to connections as soon as possible
                 nursery.start_soon(self._run)
                 # in parallel we start to populate the pool
                 nursery.start_soon(self.fill_pool)
         except KeyboardInterrupt:
             pass
예제 #24
0
async def handle_request(tag):
    # Write to task-local storage:
    request_info.set(tag)

    log("Request handler started")
    await trio.sleep(random.random())
    async with trio.open_nursery() as nursery:
        nursery.start_soon(concurrent_helper, "a")
        nursery.start_soon(concurrent_helper, "b")
    await trio.sleep(random.random())
    log("Request received finished")
예제 #25
0
파일: parallel.py 프로젝트: ziirish/burp-ui
    async def _async_get_all_backup_logs(self, client, forward=False, deep=False):
        ret = []
        backups = await self._async_get_client(client)
        queue = []
        limit = trio.CapacityLimiter(self.concurrency)
        async with trio.open_nursery() as nursery:
            for back in backups:
                nursery.start_soon(self._async_get_backup_logs, back['number'], client, forward, deep, queue, limit)

        ret = sorted(queue, key=lambda x: x['number'])
        return ret
async def main():
    """
    Another subtlety, exceptions in Trio bubble up to the parent, and are
    raised *by the nursery* on exiting the context manager. This example would
    not work if the `try..except` was only wrapping the `start_soon`.
    """
    try:
        async with trio.open_nursery() as nursery:
            for service in SERVICES:
                nursery.start_soon(fetch_ip, service)
    except Exception as e:
        print('Unexpected error {}'.format(e))
예제 #27
0
    async def do_tests(parent_nursery):
        async with trio.open_nursery() as nursery:
            for listener in listeners:
                for _ in range(3):
                    nursery.start_soon(client, listener)

        await wait_all_tasks_blocked()

        # verifies that all 6 streams x 2 directions each were closed ok
        assert len(record) == 12

        parent_nursery.cancel_scope.cancel()
예제 #28
0
async def parent():
    print("parent: started!")
    async with trio.open_nursery() as nursery:
        print("parent: spawning child1...")
        nursery.start_soon(child1)

        print("parent: spawning child2...")
        nursery.start_soon(child2)

        print("parent: waiting for children to finish...")
        # -- we exit the nursery block here --
    print("parent: all done!")
async def subscription_streamer(items):
    async with trio.open_nursery() as nursery:
        subs = SubscriptionStreamer()

        @nursery.start_soon
        async def the_stream():
            async with streamcontext(stream(*items)) as streamer:
                async for item in streamer:
                    await subs.send(item)
            await subs.close()

        yield subs
        nursery.cancel_scope.cancel()
예제 #30
0
async def ssl_echo_server_raw(**kwargs):
    a, b = stdlib_socket.socketpair()
    async with trio.open_nursery() as nursery:
        # Exiting the 'with a, b' context manager closes the sockets, which
        # causes the thread to exit (possibly with an error), which allows the
        # nursery context manager to exit too.
        with a, b:
            nursery.start_soon(
                trio.run_sync_in_worker_thread,
                partial(ssl_echo_serve_sync, b, **kwargs)
            )

            await yield_(SocketStream(tsocket.from_stdlib_socket(a)))
예제 #31
0
async def main_async(options, stdout_log_handler):

    # Get paths
    cachepath = options.cachepath

    backend_factory = get_backend_factory(options)
    backend_pool = BackendPool(backend_factory)
    atexit.register(backend_pool.flush)

    # Retrieve metadata
    with backend_pool() as backend:
        (param, db) = get_metadata(backend, cachepath)

    #if param['max_obj_size'] < options.min_obj_size:
    #    raise QuietError('Maximum object size must be bigger than minimum object size.',
    #                     exitcode=2)

    # Handle --cachesize
    rec_cachesize = options.max_cache_entries * param['max_obj_size'] / 2
    avail_cache = shutil.disk_usage(os.path.dirname(cachepath))[2] / 1024
    if options.cachesize is None:
        options.cachesize = min(rec_cachesize, 0.8 * avail_cache)
        log.info('Setting cache size to %d MB', options.cachesize / 1024)
    elif options.cachesize > avail_cache:
        log.warning('Requested cache size %d MB, but only %d MB available',
                    options.cachesize / 1024, avail_cache / 1024)

    if options.nfs:
        # NFS may try to look up '..', so we have to speed up this kind of query
        log.info('Creating NFS indices...')
        db.execute(
            'CREATE INDEX IF NOT EXISTS ix_contents_inode ON contents(inode)')

    else:
        db.execute('DROP INDEX IF EXISTS ix_contents_inode')

    metadata_upload_task = MetadataUploadTask(backend_pool, param, db,
                                              options.metadata_upload_interval)
    block_cache = BlockCache(backend_pool, db, cachepath + '-cache',
                             options.cachesize * 1024,
                             options.max_cache_entries)
    commit_task = CommitTask(block_cache)
    operations = fs.Operations(block_cache,
                               db,
                               max_obj_size=param['max_obj_size'],
                               inode_cache=InodeCache(db, param['inode_gen']),
                               upload_task=metadata_upload_task)
    block_cache.fs = operations
    metadata_upload_task.fs = operations

    async with trio.open_nursery() as nursery:
        with ExitStack() as cm:
            log.info('Mounting %s at %s...', options.storage_url,
                     options.mountpoint)
            try:
                pyfuse3.init(operations, options.mountpoint,
                             get_fuse_opts(options))
            except RuntimeError as exc:
                raise QuietError(str(exc), exitcode=39)

            unmount_clean = False

            def unmount():
                log.info("Unmounting file system...")
                pyfuse3.close(unmount=unmount_clean)

            cm.callback(unmount)

            if options.fg or options.systemd:
                faulthandler.enable()
                faulthandler.register(signal.SIGUSR1)
            else:
                if stdout_log_handler:
                    logging.getLogger().removeHandler(stdout_log_handler)
                crit_log_fd = os.open(
                    os.path.join(options.cachedir, 'mount.s3ql_crit.log'),
                    flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY,
                    mode=0o644)
                faulthandler.enable(crit_log_fd)
                faulthandler.register(signal.SIGUSR1, file=crit_log_fd)
                daemonize(options.cachedir)

            mark_metadata_dirty(backend, cachepath, param)

            block_cache.init(options.threads)

            nursery.start_soon(metadata_upload_task.run,
                               name='metadata-upload-task')
            cm.callback(metadata_upload_task.stop)

            nursery.start_soon(commit_task.run, name='commit-task')
            cm.callback(commit_task.stop)

            exc_info = setup_exchook()

            if options.systemd:
                import systemd.daemon
                systemd.daemon.notify('READY=1')

            ret = None
            try:
                toggle_int_signal_handling(True)
                ret = await pyfuse3.main()
            except KeyboardInterrupt:
                # re-block SIGINT before log.info() call to reduce the possibility for a second KeyboardInterrupt
                toggle_int_signal_handling(False)
                log.info("Got CTRL-C. Exit gracefully.")
            finally:
                # For a clean unmount we need to ignore any repeated SIGINTs from here
                toggle_int_signal_handling(False)
                await operations.destroy()
                await block_cache.destroy(options.keep_cache)

            if ret is not None:
                raise RuntimeError('Received signal %d, terminating' % (ret, ))

            # Re-raise if main loop terminated due to exception in other thread
            if exc_info:
                (exc_inst, exc_tb) = exc_info
                raise exc_inst.with_traceback(exc_tb)

            log.info("FUSE main loop terminated.")

            unmount_clean = True

    # At this point, there should be no other threads left

    # Do not update .params yet, dump_metadata() may fail if the database is
    # corrupted, in which case we want to force an fsck.
    if operations.failsafe:
        log.warning('File system errors encountered, marking for fsck.')
        param['needs_fsck'] = True
    with backend_pool() as backend:
        seq_no = get_seq_no(backend)
        if metadata_upload_task.db_mtime == os.stat(cachepath +
                                                    '.db').st_mtime:
            log.info('File system unchanged, not uploading metadata.')
            del backend['s3ql_seq_no_%d' % param['seq_no']]
            param['seq_no'] -= 1
            save_params(cachepath, param)
        elif seq_no == param['seq_no']:
            param['last-modified'] = time.time()
            dump_and_upload_metadata(backend, db, param)
            save_params(cachepath, param)
        else:
            log.error(
                'Remote metadata is newer than local (%d vs %d), '
                'refusing to overwrite!', seq_no, param['seq_no'])
            log.error(
                'The locally cached metadata will be *lost* the next time the file system '
                'is mounted or checked and has therefore been backed up.')
            for name in (cachepath + '.params', cachepath + '.db'):
                for i in range(4)[::-1]:
                    if os.path.exists(name + '.%d' % i):
                        os.rename(name + '.%d' % i, name + '.%d' % (i + 1))
                os.rename(name, name + '.0')

    log.info('Cleaning up local metadata...')
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close()

    log.info('All done.')
예제 #32
0
 async def run(self):
     async with trio.open_nursery() as nursery:
         await self.update_scheduler_command()
         await self.track_reference()
예제 #33
0
    async def _run(self, task_status: trio_typing.TaskStatus[None]) -> None:
        # This send/receive channel is fed any event/config pair that should be
        # broadcast.
        # Those messages are then retrieved from the receive channel by the
        # `_process_outbound_messages` daemon and broadcast to the appropriate
        # `RemoteEndpoint`.
        (self._outbound_send_channel,
         outbound_receive_channel) = cast(OutboundBroadcastChannelPair,
                                          trio.open_memory_channel(100))

        # This send/receive channel is fed by connected `RemoteEndpoint`
        # objects which feed received events into the send side of the channel.
        # Those messages are then retrieved from the receive channel by the
        # `_process_inbound_messages` daemon.
        (self._inbound_send_channel,
         inbound_receive_channel) = cast(WireBroadcastChannelPair,
                                         trio.open_memory_channel(100))

        # This send/receive channel is fed by
        # `Endpoint.connect_to_endpoint` which places a 2-tuple of
        # (ConnectionConfig, trio.Event) which is retrieved by
        # `_process_connections`.
        (self._connection_send_channel,
         connection_receive_channel) = cast(ConnectionChannelPair,
                                            trio.open_memory_channel(100))

        self.logger.debug("%s: starting", self)

        async with trio.open_nursery() as nursery:
            #
            # _process_outbound_messages:
            #     Manages a channel which all outgoing
            #     event broadcasts are placed on, running them each through the
            #     appropriate `RemoteEndpoint.send_message`
            #
            nursery.start_soon(self._process_outbound_messages,
                               outbound_receive_channel)

            #
            # _process_inbound_messages:
            #     Manages a channel which all incoming
            #     event broadcasts are placed on, running each event through
            #     the internal `_process_item` handler which handles all of the
            #     internal logic for the various
            #     `request/response/subscribe/stream/wait_for` API.
            #
            nursery.start_soon(self._process_inbound_messages,
                               inbound_receive_channel)

            #
            # _process_connections:
            #     When `Endpoint.connect_to_endpoint` is called, the actual
            #     connection process is done asynchronously by putting the
            #     `ConnectionConfig` onto a queue which this process
            #     retrieves to establish the new connection.  This includes
            #     handing the `RemoteEndpoint` oblect off to the `RemoteManager`
            #     which takes care of the connection lifecycle.
            #
            nursery.start_soon(self._process_connections,
                               connection_receive_channel, nursery)

            #
            # _monitor_subscription_changes
            #    Monitors an event for local changes to subscriptions to
            #    propagate those changes to remotes.
            nursery.start_soon(self._monitor_subscription_changes)

            # mark the endpoint as running.
            self._running.set()
            # tell the nursery that we are started.
            task_status.started()

            await self.wait_stopped()

            nursery.cancel_scope.cancel()
예제 #34
0
async def handle_keyboard_input(

    searchbar: SearchBar,
    recv_chan: trio.abc.ReceiveChannel,

) -> None:

    global _search_active, _search_enabled

    # startup
    bar = searchbar
    search = searchbar.parent()
    godwidget = search.godwidget
    view = bar.view
    view.set_font_size(bar.dpi_font.px_size)

    send, recv = trio.open_memory_channel(16)

    async with trio.open_nursery() as n:

        # start a background multi-searcher task which receives
        # patterns relayed from this keyboard input handler and
        # async updates the completer view's results.
        n.start_soon(
            partial(
                fill_results,
                search,
                recv,
            )
        )

        async for kbmsg in recv_chan:
            event, etype, key, mods, txt = kbmsg.to_tuple()

            log.debug(f'key: {key}, mods: {mods}, txt: {txt}')

            ctl = False
            if mods == Qt.ControlModifier:
                ctl = True

            if key in (Qt.Key_Enter, Qt.Key_Return):

                await search.chart_current_item(clear_to_cache=True)
                _search_enabled = False
                continue

            elif not ctl and not bar.text():
                # if nothing in search text show the cache
                view.set_section_entries(
                    'cache',
                    list(reversed(godwidget._chart_cache)),
                    clear_all=True,
                )
                continue

            # cancel and close
            if ctl and key in {
                Qt.Key_C,
                Qt.Key_Space,   # i feel like this is the "native" one
                Qt.Key_Alt,
            }:
                search.bar.unfocus()

                # kill the search and focus back on main chart
                if godwidget:
                    godwidget.focus()

                continue

            if ctl and key in {
                Qt.Key_L,
            }:
                # like url (link) highlight in a web browser
                bar.focus()

            # selection navigation controls
            elif ctl and key in {
                Qt.Key_D,
            }:
                view.next_section(direction='down')
                _search_enabled = False

            elif ctl and key in {
                Qt.Key_U,
            }:
                view.next_section(direction='up')
                _search_enabled = False

            # selection navigation controls
            elif (ctl and key in {

                Qt.Key_K,
                Qt.Key_J,

            }) or key in {

                Qt.Key_Up,
                Qt.Key_Down,
            }:
                _search_enabled = False
                if key in {Qt.Key_K, Qt.Key_Up}:
                    item = view.select_previous()

                elif key in {Qt.Key_J, Qt.Key_Down}:
                    item = view.select_next()

                if item:
                    parent_item = item.parent()

                    if parent_item and parent_item.text() == 'cache':

                        # if it's a cache item, switch and show it immediately
                        await search.chart_current_item(clear_to_cache=False)

            elif not ctl:
                # relay to completer task
                _search_enabled = True
                send.send_nowait(search.bar.text())
                _search_active.set()
예제 #35
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        identity_scheme_registry = default_identity_scheme_registry
        message_type_registry = default_message_type_registry

        nodedb_dir = get_nodedb_dir(boot_info)
        nodedb_dir.mkdir(exist_ok=True)
        node_db = NodeDB(default_identity_scheme_registry, LevelDB(nodedb_dir))

        local_private_key = get_local_private_key(boot_info)
        local_enr = await get_local_enr(boot_info, node_db, local_private_key)
        local_node_id = local_enr.node_id

        routing_table = KademliaRoutingTable(local_node_id,
                                             NUM_ROUTING_TABLE_BUCKETS)

        node_db.set_enr(local_enr)
        for enr_repr in boot_info.args.discovery_boot_enrs or ():
            enr = ENR.from_repr(enr_repr)
            node_db.set_enr(enr)
            routing_table.update(enr.node_id)

        port = boot_info.args.discovery_port

        socket = trio.socket.socket(
            family=trio.socket.AF_INET,
            type=trio.socket.SOCK_DGRAM,
        )
        outgoing_datagram_channels = trio.open_memory_channel[
            OutgoingDatagram](0)
        incoming_datagram_channels = trio.open_memory_channel[
            IncomingDatagram](0)
        outgoing_packet_channels = trio.open_memory_channel[OutgoingPacket](0)
        incoming_packet_channels = trio.open_memory_channel[IncomingPacket](0)
        outgoing_message_channels = trio.open_memory_channel[OutgoingMessage](
            0)
        incoming_message_channels = trio.open_memory_channel[IncomingMessage](
            0)
        endpoint_vote_channels = trio.open_memory_channel[EndpointVote](0)

        # types ignored due to https://github.com/ethereum/async-service/issues/5
        datagram_sender = DatagramSender(  # type: ignore
            outgoing_datagram_channels[1],
            socket,
        )
        datagram_receiver = DatagramReceiver(  # type: ignore
            socket,
            incoming_datagram_channels[0],
        )

        packet_encoder = PacketEncoder(  # type: ignore
            outgoing_packet_channels[1],
            outgoing_datagram_channels[0],
        )
        packet_decoder = PacketDecoder(  # type: ignore
            incoming_datagram_channels[1],
            incoming_packet_channels[0],
        )

        packer = Packer(
            local_private_key=local_private_key.to_bytes(),
            local_node_id=local_node_id,
            node_db=node_db,
            message_type_registry=message_type_registry,
            incoming_packet_receive_channel=incoming_packet_channels[1],
            incoming_message_send_channel=incoming_message_channels[0],
            outgoing_message_receive_channel=outgoing_message_channels[1],
            outgoing_packet_send_channel=outgoing_packet_channels[0],
        )

        message_dispatcher = MessageDispatcher(
            node_db=node_db,
            incoming_message_receive_channel=incoming_message_channels[1],
            outgoing_message_send_channel=outgoing_message_channels[0],
        )

        endpoint_tracker = EndpointTracker(
            local_private_key=local_private_key.to_bytes(),
            local_node_id=local_node_id,
            node_db=node_db,
            identity_scheme_registry=identity_scheme_registry,
            vote_receive_channel=endpoint_vote_channels[1],
        )

        routing_table_manager = RoutingTableManager(
            local_node_id=local_node_id,
            routing_table=routing_table,
            message_dispatcher=message_dispatcher,
            node_db=node_db,
            outgoing_message_send_channel=outgoing_message_channels[0],
            endpoint_vote_send_channel=endpoint_vote_channels[0],
        )

        logger.info(f"Starting discovery, listening on port {port}")
        logger.info(f"Local Node ID: {encode_hex(local_enr.node_id)}")
        logger.info(f"Local ENR: {local_enr}")

        await socket.bind(("0.0.0.0", port))
        services = (
            datagram_sender,
            datagram_receiver,
            packet_encoder,
            packet_decoder,
            packer,
            message_dispatcher,
            endpoint_tracker,
            routing_table_manager,
        )
        async with trio.open_nursery() as nursery:
            for service in services:
                nursery.start_soon(async_service.TrioManager.run_service,
                                   service)
예제 #36
0
파일: trio.py 프로젝트: FloFaber/Sudoku
 async def __aenter__(self):
     self._nursery_mgr = trio.open_nursery()
     nursery = await self._nursery_mgr.__aenter__()
     await self.start(nursery)
     return self
예제 #37
0
파일: full.py 프로젝트: binaryflesh/trinity
 async def _connect_preferred_nodes(self) -> None:
     async with trio.open_nursery() as nursery:
         for preferred_maddr in self._preferred_nodes:
             nursery.start_soon(self._host.add_peer_from_maddr,
                                preferred_maddr)
예제 #38
0
파일: conn.py 프로젝트: Contextualist/grain
async def use_or_open_nursery(_n):
    if _n is not None:
        yield _n
    else:
        async with trio.open_nursery() as _n:
            yield _n
예제 #39
0
파일: _subprocess.py 프로젝트: esnyder/trio
async def run_process(command,
                      *,
                      stdin=b"",
                      capture_stdout=False,
                      capture_stderr=False,
                      check=True,
                      **options):
    """Run ``command`` in a subprocess, wait for it to complete, and
    return a :class:`subprocess.CompletedProcess` instance describing
    the results.

    If cancelled, :func:`run_process` terminates the subprocess and
    waits for it to exit before propagating the cancellation, like
    :meth:`Process.aclose`.

    **Input:** The subprocess's standard input stream is set up to
    receive the bytes provided as ``stdin``.  Once the given input has
    been fully delivered, or if none is provided, the subprocess will
    receive end-of-file when reading from its standard input.
    Alternatively, if you want the subprocess to read its
    standard input from the same place as the parent Trio process, you
    can pass ``stdin=None``.

    **Output:** By default, any output produced by the subprocess is
    passed through to the standard output and error streams of the
    parent Trio process. If you would like to capture this output and
    do something with it, you can pass ``capture_stdout=True`` to
    capture the subprocess's standard output, and/or
    ``capture_stderr=True`` to capture its standard error.  Captured
    data is provided as the
    :attr:`~subprocess.CompletedProcess.stdout` and/or
    :attr:`~subprocess.CompletedProcess.stderr` attributes of the
    returned :class:`~subprocess.CompletedProcess` object.  The value
    for any stream that was not captured will be ``None``.
    
    If you want to capture both stdout and stderr while keeping them
    separate, pass ``capture_stdout=True, capture_stderr=True``.
    
    If you want to capture both stdout and stderr but mixed together
    in the order they were printed, use: ``capture_stdout=True, stderr=subprocess.STDOUT``.
    This directs the child's stderr into its stdout, so the combined
    output will be available in the `~subprocess.CompletedProcess.stdout`
    attribute.

    **Error checking:** If the subprocess exits with a nonzero status
    code, indicating failure, :func:`run_process` raises a
    :exc:`subprocess.CalledProcessError` exception rather than
    returning normally. The captured outputs are still available as
    the :attr:`~subprocess.CalledProcessError.stdout` and
    :attr:`~subprocess.CalledProcessError.stderr` attributes of that
    exception.  To disable this behavior, so that :func:`run_process`
    returns normally even if the subprocess exits abnormally, pass
    ``check=False``.

    Args:
      command (list or str): The command to run. Typically this is a
          sequence of strings such as ``['ls', '-l', 'directory with spaces']``,
          where the first element names the executable to invoke and the other
          elements specify its arguments. With ``shell=True`` in the
          ``**options``, or on Windows, ``command`` may alternatively
          be a string, which will be parsed following platform-dependent
          :ref:`quoting rules <subprocess-quoting>`.
      stdin (:obj:`bytes`, file descriptor, or None): The bytes to provide to
          the subprocess on its standard input stream, or ``None`` if the
          subprocess's standard input should come from the same place as
          the parent Trio process's standard input. As is the case with
          the :mod:`subprocess` module, you can also pass a
          file descriptor or an object with a ``fileno()`` method,
          in which case the subprocess's standard input will come from
          that file.
      capture_stdout (bool): If true, capture the bytes that the subprocess
          writes to its standard output stream and return them in the
          :attr:`~subprocess.CompletedProcess.stdout` attribute
          of the returned :class:`~subprocess.CompletedProcess` object.
      capture_stderr (bool): If true, capture the bytes that the subprocess
          writes to its standard error stream and return them in the
          :attr:`~subprocess.CompletedProcess.stderr` attribute
          of the returned :class:`~subprocess.CompletedProcess` object.
      check (bool): If false, don't validate that the subprocess exits
          successfully. You should be sure to check the
          ``returncode`` attribute of the returned object if you pass
          ``check=False``, so that errors don't pass silently.
      **options: :func:`run_process` also accepts any :ref:`general subprocess
          options <subprocess-options>` and passes them on to the
          :class:`~trio.Process` constructor. This includes the
          ``stdout`` and ``stderr`` options, which provide additional
          redirection possibilities such as ``stderr=subprocess.STDOUT``,
          ``stdout=subprocess.DEVNULL``, or file descriptors.

    Returns:
      A :class:`subprocess.CompletedProcess` instance describing the
      return code and outputs.

    Raises:
      UnicodeError: if ``stdin`` is specified as a Unicode string, rather
          than bytes
      ValueError: if multiple redirections are specified for the same
          stream, e.g., both ``capture_stdout=True`` and
          ``stdout=subprocess.DEVNULL``
      subprocess.CalledProcessError: if ``check=False`` is not passed
          and the process exits with a nonzero exit status
      OSError: if an error is encountered starting or communicating with
          the process

    .. note:: The child process runs in the same process group as the parent
       Trio process, so a Ctrl+C will be delivered simultaneously to both
       parent and child. If you don't want this behavior, consult your
       platform's documentation for starting child processes in a different
       process group.

    """

    if isinstance(stdin, str):
        raise UnicodeError("process stdin must be bytes, not str")
    if stdin == subprocess.PIPE:
        raise ValueError(
            "stdin=subprocess.PIPE doesn't make sense since the pipe "
            "is internal to run_process(); pass the actual data you "
            "want to send over that pipe instead")
    if isinstance(stdin, (bytes, bytearray, memoryview)):
        input = stdin
        options["stdin"] = subprocess.PIPE
    else:
        # stdin should be something acceptable to Process
        # (None, DEVNULL, a file descriptor, etc) and Process
        # will raise if it's not
        input = None
        options["stdin"] = stdin

    if capture_stdout:
        if "stdout" in options:
            raise ValueError("can't specify both stdout and capture_stdout")
        options["stdout"] = subprocess.PIPE
    if capture_stderr:
        if "stderr" in options:
            raise ValueError("can't specify both stderr and capture_stderr")
        options["stderr"] = subprocess.PIPE

    stdout_chunks = []
    stderr_chunks = []

    async with Process(command, **options) as proc:

        async def feed_input():
            async with proc.stdin:
                try:
                    await proc.stdin.send_all(input)
                except trio.BrokenResourceError:
                    pass

        async def read_output(stream, chunks):
            async with stream:
                while True:
                    chunk = await stream.receive_some(32768)
                    if not chunk:
                        break
                    chunks.append(chunk)

        async with trio.open_nursery() as nursery:
            if proc.stdin is not None:
                nursery.start_soon(feed_input)
            if proc.stdout is not None:
                nursery.start_soon(read_output, proc.stdout, stdout_chunks)
            if proc.stderr is not None:
                nursery.start_soon(read_output, proc.stderr, stderr_chunks)
            await proc.wait()

    stdout = b"".join(stdout_chunks) if proc.stdout is not None else None
    stderr = b"".join(stderr_chunks) if proc.stderr is not None else None

    if proc.returncode and check:
        raise subprocess.CalledProcessError(proc.returncode,
                                            proc.args,
                                            output=stdout,
                                            stderr=stderr)
    else:
        return subprocess.CompletedProcess(proc.args, proc.returncode, stdout,
                                           stderr)
예제 #40
0
파일: cli.py 프로젝트: Contextualist/grain
 async def _ls(head):
     with _handle_connection_error(head):
         async with trio.open_nursery() as _n, \
                    SocketChannel(f"{head}", dial=True, _n=_n) as c:
             await c.send(dict(cmd="STA"))
             print((await c.receive())['result'].decode())
async def main():
    async with trio.open_nursery() as nursery:
        nursery.start_soon(foo)
        nursery.start_soon(bar)
예제 #42
0
 async def parent():
     async with trio.open_nursery() as nursery:
         nursery.start_soon(sleep)
         nursery.start_soon(client)
         nursery.start_soon(sleep)
 async def connection_watcher(*, task_status=trio.TASK_STATUS_IGNORED):
     async with trio.open_nursery() as nursery:
         task_status.started(nursery)
         await wait_all_tasks_blocked()
         assert len(nursery.child_tasks) == 10
         raise Done
예제 #44
0
파일: full.py 프로젝트: binaryflesh/trinity
 async def _handle_gossip(self) -> None:
     gossip_handlers = (self._handle_block_gossip, )
     async with trio.open_nursery() as nursery:
         for handler in gossip_handlers:
             nursery.start_soon(handler)
예제 #45
0
 async def run(self) -> None:
     async with trio.open_nursery() as nursery:
         nursery.start_soon(self._make_measurements)
         nursery.start_soon(self._reduce_measurements)
예제 #46
0
async def test_claimer_handle_command_failure(backend, running_backend, alice,
                                              alice_backend_cmds, monkeypatch,
                                              fail_on_step):
    invitation = await backend.invite.new_for_device(
        organization_id=alice.organization_id, greeter_user_id=alice.user_id)
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr,
        organization_id=alice.organization_id,
        invitation_type=InvitationType.DEVICE,
        token=invitation.token,
    )

    async def _cancel_invitation():
        await backend.invite.delete(
            organization_id=alice.organization_id,
            greeter=alice.user_id,
            token=invitation_addr.token,
            on=pendulum_now(),
            reason=InvitationDeletedReason.CANCELLED,
        )

    async with backend_invited_cmds_factory(
            addr=invitation_addr) as claimer_cmds:
        greeter_initial_ctx = UserGreetInitialCtx(cmds=alice_backend_cmds,
                                                  token=invitation_addr.token)
        claimer_initial_ctx = await claimer_retrieve_info(claimer_cmds)

        claimer_in_progress_ctx = None
        greeter_in_progress_ctx = None

        async def _do_claimer():
            nonlocal claimer_in_progress_ctx
            if fail_on_step == "wait_peer":
                return
            claimer_in_progress_ctx = await claimer_initial_ctx.do_wait_peer()
            if fail_on_step == "signify_trust":
                return
            claimer_in_progress_ctx = await claimer_in_progress_ctx.do_signify_trust(
            )
            if fail_on_step == "wait_peer_trust":
                return
            claimer_in_progress_ctx = await claimer_in_progress_ctx.do_wait_peer_trust(
            )

        async def _do_greeter():
            nonlocal greeter_in_progress_ctx
            if fail_on_step == "wait_peer":
                return
            greeter_in_progress_ctx = await greeter_initial_ctx.do_wait_peer()
            if fail_on_step == "signify_trust":
                return
            greeter_in_progress_ctx = await greeter_in_progress_ctx.do_wait_peer_trust(
            )
            if fail_on_step == "wait_peer_trust":
                return
            greeter_in_progress_ctx = await greeter_in_progress_ctx.do_signify_trust(
            )

        with trio.fail_after(1):

            async with trio.open_nursery() as nursery:
                nursery.start_soon(_do_claimer)
                nursery.start_soon(_do_greeter)

        deleted_event = trio.Event()

        async def _send_event(*args, **kwargs):
            if BackendEvent.INVITE_STATUS_CHANGED in args and (
                    kwargs.get("status") == InvitationStatus.DELETED
                    or kwargs.get("status_str")
                    == InvitationStatus.DELETED.value):
                deleted_event.set()
            await trio.sleep(0)

        backend.invite._send_event = _send_event
        monkeypatch.setattr("parsec.backend.postgresql.invite.send_signal",
                            _send_event)

        with trio.fail_after(1):
            await _cancel_invitation()
            await deleted_event.wait()
            with pytest.raises(BackendInvitationAlreadyUsed) as exc_info:
                if fail_on_step == "wait_peer":
                    await claimer_initial_ctx.do_wait_peer()
                elif fail_on_step == "signify_trust":
                    await claimer_in_progress_ctx.do_signify_trust()
                elif fail_on_step == "wait_peer_trust":
                    await claimer_in_progress_ctx.do_wait_peer_trust()
                elif fail_on_step == "claim_device":
                    await claimer_in_progress_ctx.do_claim_device(
                        requested_device_label="TheSecretDevice")
                else:
                    raise AssertionError(f"Unknown step {fail_on_step}")
            assert str(exc_info.value
                       ) == "Invalid handshake: Invitation already deleted"
 async def _send_random_writes(skvbc):
     with trio.move_on_after(seconds=1):
         async with trio.open_nursery() as nursery:
             nursery.start_soon(skvbc.send_indefinite_tracked_ops, 1)
예제 #48
0
async def test_claimer_handle_cancel_event(backend, running_backend, alice,
                                           alice_backend_cmds, fail_on_step):
    invitation = await backend.invite.new_for_device(
        organization_id=alice.organization_id, greeter_user_id=alice.user_id)
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr,
        organization_id=alice.organization_id,
        invitation_type=InvitationType.DEVICE,
        token=invitation.token,
    )

    async def _cancel_invitation():
        await backend.invite.delete(
            organization_id=alice.organization_id,
            greeter=alice.user_id,
            token=invitation_addr.token,
            on=pendulum_now(),
            reason=InvitationDeletedReason.CANCELLED,
        )

    async with backend_invited_cmds_factory(
            addr=invitation_addr) as claimer_cmds:
        greeter_initial_ctx = UserGreetInitialCtx(cmds=alice_backend_cmds,
                                                  token=invitation_addr.token)
        claimer_initial_ctx = await claimer_retrieve_info(claimer_cmds)

        claimer_in_progress_ctx = None
        greeter_in_progress_ctx = None

        async def _do_claimer():
            nonlocal claimer_in_progress_ctx
            if fail_on_step == "wait_peer":
                return
            claimer_in_progress_ctx = await claimer_initial_ctx.do_wait_peer()
            if fail_on_step == "signify_trust":
                return
            claimer_in_progress_ctx = await claimer_in_progress_ctx.do_signify_trust(
            )
            if fail_on_step == "wait_peer_trust":
                return
            claimer_in_progress_ctx = await claimer_in_progress_ctx.do_wait_peer_trust(
            )

        async def _do_greeter():
            nonlocal greeter_in_progress_ctx
            if fail_on_step == "wait_peer":
                return
            greeter_in_progress_ctx = await greeter_initial_ctx.do_wait_peer()
            if fail_on_step == "signify_trust":
                return
            greeter_in_progress_ctx = await greeter_in_progress_ctx.do_wait_peer_trust(
            )
            if fail_on_step == "wait_peer_trust":
                return
            greeter_in_progress_ctx = await greeter_in_progress_ctx.do_signify_trust(
            )

        with trio.fail_after(1):
            async with trio.open_nursery() as nursery:

                nursery.start_soon(_do_claimer)
                nursery.start_soon(_do_greeter)

        with trio.fail_after(1):

            async with trio.open_nursery() as nursery:

                async def _do_claimer_wait_peer():
                    with pytest.raises(
                            BackendInvitationAlreadyUsed) as exc_info:
                        await claimer_initial_ctx.do_wait_peer()
                    assert str(
                        exc_info.value
                    ) == "Invalid handshake: Invitation already deleted"

                async def _do_claimer_signify_trust():
                    with pytest.raises(
                            BackendInvitationAlreadyUsed) as exc_info:
                        await claimer_in_progress_ctx.do_signify_trust()
                    assert str(
                        exc_info.value
                    ) == "Invalid handshake: Invitation already deleted"

                async def _do_claimer_wait_peer_trust():
                    with pytest.raises(
                            BackendInvitationAlreadyUsed) as exc_info:
                        await claimer_in_progress_ctx.do_wait_peer_trust()
                    assert str(
                        exc_info.value
                    ) == "Invalid handshake: Invitation already deleted"

                async def _do_claimer_claim_device():
                    with pytest.raises(
                            BackendInvitationAlreadyUsed) as exc_info:
                        await claimer_in_progress_ctx.do_claim_device(
                            requested_device_label="TheSecretDevice")
                    assert str(
                        exc_info.value
                    ) == "Invalid handshake: Invitation already deleted"

                steps = {
                    "wait_peer": _do_claimer_wait_peer,
                    "signify_trust": _do_claimer_signify_trust,
                    "wait_peer_trust": _do_claimer_wait_peer_trust,
                    "claim_device": _do_claimer_claim_device,
                }
                _do_claimer = steps[fail_on_step]

                with backend.event_bus.listen() as spy:
                    nursery.start_soon(_do_claimer)
                    # Be sure that _do_claimer got valid invitations before cancelation
                    await spy.wait_with_timeout(
                        BackendEvent.INVITE_CONDUIT_UPDATED)
                    await _cancel_invitation()
                    await spy.wait_with_timeout(
                        BackendEvent.INVITE_STATUS_CHANGED)
예제 #49
0
async def test_trio_sleep(n, w):
    async with trio.open_nursery() as nursery:
        nursery.start_soon(_trio_sleep, n, w)
예제 #50
0
async def test_claimer_handle_reset(backend, running_backend, alice,
                                    alice_backend_cmds):
    invitation = await backend.invite.new_for_device(
        organization_id=alice.organization_id, greeter_user_id=alice.user_id)
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr,
        organization_id=alice.organization_id,
        invitation_type=InvitationType.DEVICE,
        token=invitation.token,
    )

    async with backend_invited_cmds_factory(
            addr=invitation_addr) as claimer_cmds:
        greeter_initial_ctx = UserGreetInitialCtx(cmds=alice_backend_cmds,
                                                  token=invitation_addr.token)
        claimer_initial_ctx = await claimer_retrieve_info(claimer_cmds)

        claimer_in_progress_ctx = None
        greeter_in_progress_ctx = None

        # Step 1
        with trio.fail_after(1):
            async with trio.open_nursery() as nursery:

                async def _do_claimer():
                    nonlocal claimer_in_progress_ctx
                    claimer_in_progress_ctx = await claimer_initial_ctx.do_wait_peer(
                    )

                async def _do_greeter():
                    nonlocal greeter_in_progress_ctx
                    greeter_in_progress_ctx = await greeter_initial_ctx.do_wait_peer(
                    )

                nursery.start_soon(_do_claimer)
                nursery.start_soon(_do_greeter)

        # Claimer restart the conduit while greeter try to do step 2
        with trio.fail_after(1):
            async with trio.open_nursery() as nursery:

                async def _do_claimer():
                    nonlocal claimer_in_progress_ctx
                    claimer_in_progress_ctx = await claimer_initial_ctx.do_wait_peer(
                    )

                nursery.start_soon(_do_claimer)
                with pytest.raises(InvitePeerResetError):
                    await greeter_in_progress_ctx.do_wait_peer_trust()

                # Greeter redo step 1
                greeter_in_progress_ctx = await greeter_initial_ctx.do_wait_peer(
                )

        # Now do the other way around: greeter restart conduit while claimer try step 2
        with trio.fail_after(1):
            async with trio.open_nursery() as nursery:

                async def _do_greeter():
                    nonlocal greeter_in_progress_ctx
                    greeter_in_progress_ctx = await greeter_initial_ctx.do_wait_peer(
                    )

                nursery.start_soon(_do_greeter)
                with pytest.raises(InvitePeerResetError):
                    await claimer_in_progress_ctx.do_signify_trust()

                # Claimer redo step 1
                claimer_in_progress_ctx = await claimer_initial_ctx.do_wait_peer(
                )
예제 #51
0
async def test_no_proxy_with_http(monkeypatch, type):
    proxy_events = []

    def _event_hook(event):
        proxy_events.append(event)

    async with trio.open_nursery() as nursery:

        if type == "no_config":
            pass
        elif type == "no_proxy_from_env":
            dont_use_proxy_port = await start_port_watchdog(
                nursery, _event_hook)
            dont_use_proxy_url = f"http://127.0.0.1:{dont_use_proxy_port}"
            monkeypatch.setitem(os.environ, "no_proxy", "*")
            # Should be ignored
            monkeypatch.setitem(os.environ, "http_proxy", dont_use_proxy_url)
            monkeypatch.setitem(os.environ, "https_proxy", dont_use_proxy_url)
        else:
            assert type == "no_proxy_from_pac"
            dont_use_proxy_port = await start_port_watchdog(
                nursery, _event_hook)
            dont_use_proxy_url = f"http://127.0.0.1:{dont_use_proxy_port}"
            pac_server_port = await start_pac_server(nursery=nursery,
                                                     pac_rule="DIRECT",
                                                     event_hook=_event_hook)
            pac_server_url = f"http://127.0.0.1:{pac_server_port}"
            monkeypatch.setitem(os.environ, "http_proxy_pac", pac_server_url)
            # Should be ignored
            monkeypatch.setitem(os.environ, "http_proxy", dont_use_proxy_url)
            monkeypatch.setitem(os.environ, "https_proxy", dont_use_proxy_url)

        async def _target_client_handler(stream):
            # To simplify things, we consider a the http requests/responses are
            # contained in single tcp trame. This is not strictly true in real life
            # but is close enough when staying on 127.0.0.1.
            req = await stream.receive_some(1024)
            match = re.match(
                (rb"^POST /foo HTTP/1.1\r\n"
                 rb"Accept-Encoding: identity\r\n"
                 rb"Content-Length: 0\r\n"
                 rb"Host: 127.0.0.1:([0-9]+)\r\n"
                 rb"User-Agent: [^\r]+\r\n"
                 rb"Connection: close\r\n"
                 rb"\r\n$"),
                req,
            )
            assert match
            t1 = match.group(1)
            assert t1 == str(target_port).encode()
            await stream.send_all(
                b"HTTP/1.1 200 OK\r\nContent-Size: 5\r\n\r\nhello")
            _event_hook("Connected to target")

        target_listeners = await nursery.start(
            partial(trio.serve_tcp,
                    _target_client_handler,
                    0,
                    host="127.0.0.1"))
        target_port = target_listeners[0].socket.getsockname()[1]

        async with real_clock_timeout():
            rep = await http_request(f"http://127.0.0.1:{target_port}/foo",
                                     method="POST")
            assert rep == b"hello"

        assert proxy_events == [
            *(["PAC file retreived from server"]
              if type == "no_proxy_from_pac" else []),
            "Connected to target",
        ]

        nursery.cancel_scope.cancel()
예제 #52
0
async def main():
    async with trio.open_nursery() as nursery:
        for i in range(3):
            nursery.start_soon(handle_request, i)
예제 #53
0
async def fill_results(

    search: SearchBar,
    recv_chan: trio.abc.ReceiveChannel,

    # kb debouncing pauses (bracket defaults)
    min_pause_time: float = 0.01,  # absolute min typing throttle

    # max pause required before slow relay
    max_pause_time: float = 6/16 + 0.001,

) -> None:
    """Task to search through providers and fill in possible
    completion results.

    """
    global _search_active, _search_enabled, _searcher_cache

    bar = search.bar
    view = bar.view
    view.select_from_idx(QModelIndex())

    last_text = bar.text()
    repeats = 0

    # cache of prior patterns to search results
    matches = defaultdict(list)
    has_results: defaultdict[str, set[str]] = defaultdict(set)

    while True:
        await _search_active.wait()
        period = None

        while True:

            last_text = bar.text()
            wait_start = time.time()

            with trio.move_on_after(max_pause_time):
                pattern = await recv_chan.receive()

            period = time.time() - wait_start
            print(f'{pattern} after {period}')

            # during fast multiple key inputs, wait until a pause
            # (in typing) to initiate search
            if period < min_pause_time:
                log.debug(f'Ignoring fast input for {pattern}')
                continue

            text = bar.text()
            # print(f'search: {text}')

            if not text or text.isspace():
                # print('idling')
                _search_active = trio.Event()
                break

            if text == last_text:
                repeats += 1

            if not _search_enabled:
                # print('search currently disabled')
                break

            already_has_results = has_results[text]
            log.debug(f'Search req for {text}')

            # issue multi-provider fan-out search request and place
            # "searching.." statuses on outstanding results providers
            async with trio.open_nursery() as n:

                for provider, (search, pause) in (
                    _searcher_cache.copy().items()
                ):
                    # XXX: only conduct search on this backend if it's
                    # registered for the corresponding pause period AND
                    # it hasn't already been searched with the current
                    # input pattern (in which case just look up the old
                    # results).
                    if (period >= pause) and (
                        provider not in already_has_results
                    ):

                        # TODO: it may make more sense TO NOT search the
                        # cache in a bg task since we know it's fully
                        # cpu-bound.
                        if provider != 'cache':
                            view.clear_section(
                                provider, status_field='-> searchin..')

                        await n.start(
                            pack_matches,
                            view,
                            has_results,
                            matches,
                            provider,
                            text,
                            search
                        )
                    else:  # already has results for this input text
                        results = matches[(provider, text)]

                        # TODO really for the cache we need an
                        # invalidation signal so that we only re-search
                        # the cache once it's been mutated by the chart
                        # switcher.. right now we're just always
                        # re-searching it's ``dict`` since it's easier
                        # but it also causes it to be slower then cached
                        # results from other providers on occasion.
                        if results and provider != 'cache':
                            view.set_section_entries(
                                section=provider,
                                values=results,
                            )
                        else:
                            view.clear_section(provider)

            if repeats > 2 and period > max_pause_time:
                _search_active = trio.Event()
                repeats = 0
                break

            bar.show()
예제 #54
0
async def child1():
    async with trio.open_nursery() as nursery:
        nursery.start_soon(child2)
        nursery.start_soon(child2)
        nursery.start_soon(trio.sleep_forever)
예제 #55
0
async def test_good_device_claim(backend, running_backend, alice, bob,
                                 alice_backend_cmds, user_fs_factory,
                                 with_labels):
    invitation = await backend.invite.new_for_device(
        organization_id=alice.organization_id, greeter_user_id=alice.user_id)
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr,
        organization_id=alice.organization_id,
        invitation_type=InvitationType.DEVICE,
        token=invitation.token,
    )

    if with_labels:
        requested_device_label = "Foo's label"
        granted_device_label = "Bar's label"
    else:
        requested_device_label = None
        granted_device_label = None
    new_device = None

    # Simulate out-of-bounds canal
    oob_send, oob_recv = trio.open_memory_channel(0)

    async def _run_claimer():
        async with backend_invited_cmds_factory(addr=invitation_addr) as cmds:
            initial_ctx = await claimer_retrieve_info(cmds)
            assert isinstance(initial_ctx, DeviceClaimInitialCtx)
            assert initial_ctx.greeter_user_id == alice.user_id
            assert initial_ctx.greeter_human_handle == alice.human_handle

            in_progress_ctx = await initial_ctx.do_wait_peer()

            choices = in_progress_ctx.generate_greeter_sas_choices(size=4)
            assert len(choices) == 4
            assert in_progress_ctx.greeter_sas in choices

            greeter_sas = await oob_recv.receive()
            assert greeter_sas == in_progress_ctx.greeter_sas

            in_progress_ctx = await in_progress_ctx.do_signify_trust()
            await oob_send.send(in_progress_ctx.claimer_sas)

            in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

            nonlocal new_device
            new_device = await in_progress_ctx.do_claim_device(
                requested_device_label=requested_device_label)
            assert isinstance(new_device, LocalDevice)

    async def _run_greeter():
        initial_ctx = DeviceGreetInitialCtx(cmds=alice_backend_cmds,
                                            token=invitation_addr.token)

        in_progress_ctx = await initial_ctx.do_wait_peer()

        await oob_send.send(in_progress_ctx.greeter_sas)

        in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

        choices = in_progress_ctx.generate_claimer_sas_choices(size=5)
        assert len(choices) == 5
        assert in_progress_ctx.claimer_sas in choices

        claimer_sas = await oob_recv.receive()
        assert claimer_sas == in_progress_ctx.claimer_sas

        in_progress_ctx = await in_progress_ctx.do_signify_trust()

        in_progress_ctx = await in_progress_ctx.do_get_claim_requests()

        assert in_progress_ctx.requested_device_label == requested_device_label

        await in_progress_ctx.do_create_new_device(
            author=alice, device_label=granted_device_label)

    with trio.fail_after(1):
        async with trio.open_nursery() as nursery:
            nursery.start_soon(_run_claimer)
            nursery.start_soon(_run_greeter)

    assert new_device is not None
    assert new_device.user_id == alice.user_id
    assert new_device.device_name != alice.device_name
    assert new_device.device_label == granted_device_label
    assert new_device.human_handle == alice.human_handle
    assert new_device.private_key == alice.private_key
    assert new_device.signing_key != alice.signing_key
    assert new_device.profile == alice.profile
    assert new_device.user_manifest_id == alice.user_manifest_id
    assert new_device.user_manifest_key == alice.user_manifest_key
    # Make sure greeter&claimer data are not mixed
    assert new_device.local_symkey != alice.local_symkey

    # Now invitation should have been deleted
    rep = await alice_backend_cmds.invite_list()
    assert rep == {"status": "ok", "invitations": []}

    # Verify user&device data in backend
    _, device = await backend.user.get_user_with_device(
        new_device.organization_id, new_device.device_id)
    assert device.device_label == granted_device_label
    if with_labels:
        assert device.device_certificate != device.redacted_device_certificate
    else:
        assert device.device_certificate == device.redacted_device_certificate

    # Test the behavior of this new device
    async with user_fs_factory(bob) as bobfs:
        async with user_fs_factory(alice) as alicefs:
            async with user_fs_factory(new_device,
                                       initialize_in_v0=True) as newfs:
                # Old device modify user manifest
                await alicefs.workspace_create("wa")
                await alicefs.sync()

                # New sharing from other user
                wb_id = await bobfs.workspace_create("wb")
                await bobfs.workspace_share(wb_id, alice.user_id,
                                            WorkspaceRole.CONTRIBUTOR)

                # Test new device get access to both new workspaces
                await newfs.process_last_messages()
                await newfs.sync()
                newfs_um = newfs.get_user_manifest()

                # Make sure new and old device have the same view on data
                await alicefs.sync()
                alicefs_um = alicefs.get_user_manifest()
                assert newfs_um == alicefs_um
예제 #56
0
async def open_root_actor(

    # defaults are above
    arbiter_addr: Tuple[str, int] = (
        _default_arbiter_host,
        _default_arbiter_port,
    ),
    name: Optional[str] = 'root',

    # either the `multiprocessing` start method:
    # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
    # OR `trio` (the new default).
    start_method: Optional[str] = None,

    # enables the multi-process debugger support
    debug_mode: bool = False,

    # internal logging
    loglevel: Optional[str] = None,
    enable_modules: Optional[List] = None,
    rpc_module_paths: Optional[List] = None,
) -> typing.Any:
    """Async entry point for ``tractor``.

    """
    # Override the global debugger hook to make it play nice with
    # ``trio``, see:
    # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
    os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace'

    # mark top most level process as root actor
    _state._runtime_vars['_is_root'] = True

    # caps based rpc list
    enable_modules = enable_modules or []

    if rpc_module_paths:
        warnings.warn(
            "`rpc_module_paths` is now deprecated, use "
            " `enable_modules` instead.",
            DeprecationWarning,
            stacklevel=2,
        )
        enable_modules.extend(rpc_module_paths)

    if start_method is not None:
        _spawn.try_set_start_method(start_method)

    if debug_mode and _spawn._spawn_method == 'trio':
        _state._runtime_vars['_debug_mode'] = True

        # expose internal debug module to every actor allowing
        # for use of ``await tractor.breakpoint()``
        enable_modules.append('tractor._debug')

    elif debug_mode:
        raise RuntimeError(
            "Debug mode is only supported for the `trio` backend!")

    arbiter_addr = (host, port) = arbiter_addr or (_default_arbiter_host,
                                                   _default_arbiter_port)

    loglevel = loglevel or log.get_loglevel()
    if loglevel is not None:
        log._default_loglevel = loglevel
        log.get_console_log(loglevel)

    # make a temporary connection to see if an arbiter exists
    arbiter_found = False

    try:
        async with _connect_chan(host, port):
            arbiter_found = True

    except OSError:
        logger.warning(f"No actor could be found @ {host}:{port}")

    # create a local actor and start up its main routine/task
    if arbiter_found:

        # we were able to connect to an arbiter
        logger.info(f"Arbiter seems to exist @ {host}:{port}")

        actor = Actor(
            name or 'anonymous',
            arbiter_addr=arbiter_addr,
            loglevel=loglevel,
            enable_modules=enable_modules,
        )
        host, port = (host, 0)

    else:
        # start this local actor as the arbiter (aka a regular actor who
        # manages the local registry of "mailboxes")

        # Note that if the current actor is the arbiter it is desirable
        # for it to stay up indefinitely until a re-election process has
        # taken place - which is not implemented yet FYI).

        actor = Arbiter(
            name or 'arbiter',
            arbiter_addr=arbiter_addr,
            loglevel=loglevel,
            enable_modules=enable_modules,
        )

    try:
        # assign process-local actor
        _state._current_actor = actor

        # start local channel-server and fake the portal API
        # NOTE: this won't block since we provide the nursery
        logger.info(f"Starting local {actor} @ {host}:{port}")

        # start the actor runtime in a new task
        async with trio.open_nursery() as nursery:

            # ``Actor._async_main()`` creates an internal nursery and
            # thus blocks here until the entire underlying actor tree has
            # terminated thereby conducting structured concurrency.

            await nursery.start(
                partial(actor._async_main,
                        accept_addr=(host, port),
                        parent_addr=None))
            try:
                yield actor

            except (Exception, trio.MultiError) as err:
                logger.exception("Actor crashed:")
                await _debug._maybe_enter_pm(err)

                # always re-raise
                raise

            finally:
                logger.info("Shutting down root actor")
                with trio.CancelScope(shield=True):
                    await actor.cancel()
    finally:
        _state._current_actor = None
        logger.info("Root actor terminated")
예제 #57
0
    async def activate(
        self,
        updateSniffer=False,
        updateOpenPorts=False,
        user="",
        task_status=trio.TASK_STATUS_IGNORED,
    ):
        """
        Start a thread that does the following
        - for each port in the config file
        - connects to the database
        - runs sniffer class

        Returns: 0 if no changes
                1 if only Sniffer changed
                2 if only sockets changed
                3 if both changed
        """
        # Gets the info from config file initially
        self.getConfigData()

        # Return code
        retCode = 0
        # Convience reference
        replayPorts = self.responseData.keys()
        # Setup way to cancel these tasks
        with trio.CancelScope() as scope:
            # --- Start Async Sniffer ---#
            if self.sniffer is None:
                # TODO: Switch config="testing" to "base" when in production
                self.sniffer = Sniffer(
                    config="base",
                    openPorts=list(replayPorts),
                    whitelist=self.whitelist,
                    portWhitelist=self.portWhitelist,
                    honeypotIP=self.HONEY_IP,
                    managementIPs=self.MGMT_IPs,
                    port_scan_window=self.port_scan_window,
                    port_scan_sensitivity=self.port_scan_sensitivity,
                    databaser=self.db,
                )
                self.sniffer.start()
            elif updateSniffer:
                oldHash = self.sniffer.currentHash
                self.sniffer.configUpdate(
                    openPorts=list(replayPorts),
                    whitelist=self.whitelist,
                    portWhitelist=self.portWhitelist,
                    honeypotIP=self.HONEY_IP,
                    managementIPs=self.MGMT_IPs,
                    port_scan_window=self.port_scan_window,
                    port_scan_sensitivity=self.port_scan_sensitivity,
                )
                if not self.sniffer.currentHash == oldHash:
                    retCode = 1
            # Mark trio task as started (and pass cancel scope back to nursery)
            task_status.started(scope)

            # --- Open async UDP & TCP Sockets ---#
            udp_sockets = list(
                filter(lambda x: "UDP" in self.responseData[x].keys(), replayPorts)
            )
            tcp_sockets = list(
                filter(lambda x: "TCP" in self.responseData[x].keys(), replayPorts)
            )

            # Convience method to help with setting up TCP & UDP modules
            async def replay_server(listener_class, sockets, config_path, nursery):
                for port in sockets:
                    self.processList[port] = listener_class(
                        port,
                        self.responseData[port][config_path],
                        self.response_delay,
                        nursery,
                    )
                    nursery.start_soon(self.processList[port].handler)

            # --- Actually Start up listeners ---#
            try:
                async with trio.open_nursery() as nursery:
                    nursery.start_soon(
                        replay_server, UDPPortListener, udp_sockets, "UDP", nursery
                    )
                    nursery.start_soon(
                        replay_server, TCPPortListener, tcp_sockets, "TCP", nursery
                    )
            except Exception as ex:
                print("listener nursery exception: ", str(ex))
            finally:
                print("Listeners have been killed")

            # return the code here;
            # 0 means no changes,
            # 1 means only sniffer changed,
            # 2 means only TCP ports were changed,
            # 3 means both were changed
            if retCode == 1:
                self.db.alert(
                    Alert(
                        variant="admin",
                        message="Sniffer updated during runtime by " + user,
                    )
                )
            elif retCode == 2:
                self.db.alert(
                    Alert(
                        variant="admin",
                        message="TCP sockets updated during runtime by " + user,
                    )
                )
            elif retCode == 3:
                self.db.alert(
                    Alert(
                        variant="admin",
                        message="TCP sockets and Sniffer updated during runtime by "
                        + user,
                    )
                )
            elif retCode == 0:
                self.db.alert(
                    Alert(
                        variant="admin",
                        message="Attempted configuration change during runtime by "
                        + user,
                    )
                )
            return retCode
예제 #58
0
async def test_good_user_claim(backend, running_backend, alice,
                               alice_backend_cmds, user_fs_factory,
                               with_labels):
    claimer_email = "*****@*****.**"

    invitation = await backend.invite.new_for_user(
        organization_id=alice.organization_id,
        greeter_user_id=alice.user_id,
        claimer_email=claimer_email,
    )
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr,
        organization_id=alice.organization_id,
        invitation_type=InvitationType.USER,
        token=invitation.token,
    )

    if with_labels:
        # Let's pretent we invited a Fortnite player...
        requested_human_handle = HumanHandle(email="*****@*****.**",
                                             label="xXx_Z4ck_xXx")
        requested_device_label = "Ultr4_B00st"
        granted_human_handle = HumanHandle(email="*****@*****.**",
                                           label="Zack")
        granted_device_label = "Desktop"
    else:
        requested_human_handle = None
        requested_device_label = None
        granted_human_handle = None
        granted_device_label = None
    granted_profile = UserProfile.STANDARD
    new_device = None

    # Simulate out-of-bounds canal
    oob_send, oob_recv = trio.open_memory_channel(0)

    async def _run_claimer():
        async with backend_invited_cmds_factory(addr=invitation_addr) as cmds:
            initial_ctx = await claimer_retrieve_info(cmds)
            assert isinstance(initial_ctx, UserClaimInitialCtx)
            assert initial_ctx.claimer_email == claimer_email
            assert initial_ctx.greeter_user_id == alice.user_id
            assert initial_ctx.greeter_human_handle == alice.human_handle

            in_progress_ctx = await initial_ctx.do_wait_peer()

            choices = in_progress_ctx.generate_greeter_sas_choices(size=4)
            assert len(choices) == 4
            assert in_progress_ctx.greeter_sas in choices

            greeter_sas = await oob_recv.receive()
            assert greeter_sas == in_progress_ctx.greeter_sas

            in_progress_ctx = await in_progress_ctx.do_signify_trust()
            await oob_send.send(in_progress_ctx.claimer_sas)

            in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

            nonlocal new_device
            new_device = await in_progress_ctx.do_claim_user(
                requested_device_label=requested_device_label,
                requested_human_handle=requested_human_handle,
            )
            assert isinstance(new_device, LocalDevice)

    async def _run_greeter():
        initial_ctx = UserGreetInitialCtx(cmds=alice_backend_cmds,
                                          token=invitation_addr.token)

        in_progress_ctx = await initial_ctx.do_wait_peer()

        await oob_send.send(in_progress_ctx.greeter_sas)

        in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

        choices = in_progress_ctx.generate_claimer_sas_choices(size=5)
        assert len(choices) == 5
        assert in_progress_ctx.claimer_sas in choices

        claimer_sas = await oob_recv.receive()
        assert claimer_sas == in_progress_ctx.claimer_sas

        in_progress_ctx = await in_progress_ctx.do_signify_trust()

        in_progress_ctx = await in_progress_ctx.do_get_claim_requests()

        assert in_progress_ctx.requested_device_label == requested_device_label
        assert in_progress_ctx.requested_human_handle == requested_human_handle

        await in_progress_ctx.do_create_new_user(
            author=alice,
            device_label=granted_device_label,
            human_handle=granted_human_handle,
            profile=granted_profile,
        )

    with trio.fail_after(1):
        async with trio.open_nursery() as nursery:
            nursery.start_soon(_run_claimer)
            nursery.start_soon(_run_greeter)

    assert new_device is not None
    assert new_device.device_id != alice.device_id
    assert new_device.device_label == granted_device_label
    # Label is normally ignored when comparing HumanLabel
    if with_labels:
        assert new_device.human_handle.label == granted_human_handle.label
        assert new_device.human_handle.email == granted_human_handle.email
    else:
        assert new_device.human_handle is None
    assert new_device.profile == granted_profile
    # Extra check to make sure claimer&greeter data are not mixed
    assert new_device.user_manifest_id != alice.user_manifest_id
    assert new_device.user_manifest_key != alice.user_manifest_key
    assert new_device.local_symkey != alice.local_symkey

    # Now invitation should have been deleted
    rep = await alice_backend_cmds.invite_list()
    assert rep == {"status": "ok", "invitations": []}

    # Verify user&device data in backend
    user, device = await backend.user.get_user_with_device(
        new_device.organization_id, new_device.device_id)
    assert user.profile == granted_profile
    assert user.human_handle == granted_human_handle
    assert device.device_label == granted_device_label
    if with_labels:
        assert user.user_certificate != user.redacted_user_certificate
        assert device.device_certificate != device.redacted_device_certificate
    else:
        assert user.user_certificate == user.redacted_user_certificate
        assert device.device_certificate == device.redacted_device_certificate

    # Test the behavior of this new user device
    async with user_fs_factory(alice) as alicefs:
        async with user_fs_factory(new_device, initialize_in_v0=True) as newfs:
            # Share a workspace with new user
            aw_id = await alicefs.workspace_create("alice_workspace")
            await alicefs.workspace_share(aw_id, new_device.user_id,
                                          WorkspaceRole.CONTRIBUTOR)

            # New user cannot create a new workspace
            zw_id = await newfs.workspace_create("zack_workspace")
            await newfs.workspace_share(zw_id, alice.user_id,
                                        WorkspaceRole.READER)

            # Now both users should have the same workspaces
            await alicefs.process_last_messages()
            await newfs.process_last_messages()
            await newfs.sync()  # Not required, but just to make sure it works

            alice_um = alicefs.get_user_manifest()
            zack_um = newfs.get_user_manifest()

            assert {(w.id, w.key)
                    for w in alice_um.workspaces
                    } == {(w.id, w.key)
                          for w in zack_um.workspaces}
예제 #59
0
async def asyncmain():
    async with trio.open_nursery() as nursery:
        nursery.start_soon(trio.serve_tcp, handle_tcp_client, PORT)
        nursery.start_soon(watch_usb, nursery)
예제 #60
0
async def _async_main(
    name: str,
    portal: tractor._portal.Portal,
    symbols: List[str],
    brokermod: ModuleType,
    loglevel: str = 'info',
    rate: int = 3,
    test: str = '',
) -> None:
    '''Launch kivy app + all other related tasks.

    This is started with cli cmd `piker monitor`.
    '''
    feed = DataFeed(portal, brokermod)
    quote_gen, quotes = await feed.open_stream(
        symbols,
        'stock',
        rate=rate,
        test=test,
    )

    first_quotes, _ = feed.format_quotes(quotes)

    if first_quotes[0].get('last') is None:
        log.error("Broker API is down temporarily")
        return

    # build out UI
    Window.set_title(f"monitor: {name}\t(press ? for help)")
    Builder.load_string(_kv)
    box = BoxLayout(orientation='vertical', spacing=0)

    # define bid-ask "stacked" cells
    # (TODO: needs some rethinking and renaming for sure)
    bidasks = brokermod._stock_bidasks

    # add header row
    headers = first_quotes[0].keys()
    header = Row(
        {key: key
         for key in headers},
        headers=headers,
        bidasks=bidasks,
        is_header=True,
        size_hint=(1, None),
    )
    box.add_widget(header)

    # build table
    table = TickerTable(
        cols=1,
        size_hint=(1, None),
    )
    for ticker_record in first_quotes:
        table.append_row(
            ticker_record['symbol'],
            Row(ticker_record,
                headers=('symbol', ),
                bidasks=bidasks,
                table=table))
    table.last_clicked_row = next(iter(table.symbols2rows.values()))

    # associate the col headers row with the ticker table even though
    # they're technically wrapped separately in containing BoxLayout
    header.table = table

    # mark the initial sorted column header as bold and underlined
    sort_cell = header.get_cell(table.sort_key)
    sort_cell.bold = sort_cell.underline = True
    table.last_clicked_col_cell = sort_cell

    # set up a pager view for large ticker lists
    table.bind(minimum_height=table.setter('height'))

    ss = tractor.current_actor().statespace

    async def spawn_opts_chain():
        """Spawn an options chain UI in a new subactor.
        """
        from .option_chain import _async_main

        try:
            async with tractor.open_nursery() as tn:
                portal = await tn.run_in_actor(
                    'optschain',
                    _async_main,
                    symbol=table.last_clicked_row._last_record['symbol'],
                    brokername=brokermod.name,
                    loglevel=tractor.log.get_loglevel(),
                )
        except tractor.RemoteActorError:
            # don't allow option chain errors to crash this monitor
            # this is, like, the most basic of resliency policies
            log.exception(f"{portal.actor.name} crashed:")

    async with trio.open_nursery() as nursery:
        pager = PagerView(
            container=box,
            contained=table,
            nursery=nursery,
            # spawn an option chain on 'o' keybinding
            kbctls={('o', ): spawn_opts_chain},
        )
        box.add_widget(pager)

        widgets = {
            'root': box,
            'table': table,
            'box': box,
            'header': header,
            'pager': pager,
        }
        ss['widgets'] = widgets
        nursery.start_soon(update_quotes, nursery,
                           brokermod.format_stock_quote, widgets, quote_gen,
                           feed._symbol_data_cache, quotes)
        try:
            await async_runTouchApp(widgets['root'])
        finally:
            # cancel remote data feed task
            await quote_gen.aclose()
            # cancel GUI update task
            nursery.cancel_scope.cancel()