示例#1
0
async def test_delete_invitation_then_claimer_action_before_backend_closes_connection(
        exchange_testbed, backend):
    tb = exchange_testbed

    # Disable the callback responsible for closing the claimer's connection
    # on invitation deletion. This way we can test connection behavior
    # when the automatic closing takes time to be processed.
    backend.event_bus.mute(BackendEvent.INVITE_STATUS_CHANGED)

    await backend.invite.delete(
        organization_id=tb.organization_id,
        greeter=tb.greeter.user_id,
        token=tb.invitation.token,
        on=datetime(2000, 1, 2),
        reason=InvitationDeletedReason.ROTTEN,
    )

    # No need to be in the correct exchange state here given checking
    # the invitation status should be the very first thing done
    for action in [
            # `invite_info` uses a cache populated during connection handshake
            # so it will fail this test. However this is ok given not touching the
            # db precisely makes it a read-only operation.
            "1_wait_peer",
            "2a_send_hashed_nonce",
            "2b_send_nonce",
            "3a_signify_trust",
            "3b_wait_peer_trust",
            "4_communicate",
    ]:
        with pytest.raises(TransportError):
            async with real_clock_timeout():
                await tb.send_order("claimer", action)
                await tb.get_result("claimer")
示例#2
0
        async def step_6_validate_claim_info(self):
            assert self.claimer_claim_task
            u_w = self.users_widget
            gu_w = self.greet_user_widget
            guci_w = self.greet_user_check_informations_widget

            # Finally confirm the claimer info and finish the greeting !
            aqtbot.mouse_click(guci_w.button_create_user, QtCore.Qt.LeftButton)

            async with real_clock_timeout():
                await self.claimer_claim_task.join()

            def _greet_done():
                assert not gu_w.isVisible()
                assert autoclose_dialog.dialogs == [
                    ("",
                     "The user was successfully greeted in your organization.")
                ]
                # User list should be updated
                assert u_w.layout_users.count() == 4
                user_widget = u_w.layout_users.itemAt(3).widget()
                assert isinstance(user_widget, UserButton)
                assert user_widget.user_info.human_handle.email == self.granted_email
                assert user_widget.user_info.human_handle.label == self.granted_label

            await aqtbot.wait_until(_greet_done)

            return None  # Test is done \o/
示例#3
0
    async def _delete_invitation_and_assert_claimer_left(
            retrieve_previous_result):
        # Delete the invitation, claimer connection should be closed automatically
        with backend.event_bus.listen() as spy:
            await backend.invite.delete(
                organization_id=tb.organization_id,
                greeter=tb.greeter.user_id,
                token=tb.invitation.token,
                on=datetime(2000, 1, 2),
                reason=InvitationDeletedReason.ROTTEN,
            )
            await spy.wait_with_timeout(BackendEvent.INVITE_STATUS_CHANGED)

        with pytest.raises(TransportError):
            async with real_clock_timeout():
                if retrieve_previous_result:
                    await tb.get_result("claimer")
                # Even if we had to retrieve an existing result, it could have
                # been returned by backend before the invitation delete occured,
                # hence we must poll with additional requests not matter what.
                # On top of that claimer connection can take some time to be
                # closed, so we need a polling loop here.
                while True:
                    await tb.send_order("claimer", "invite_info")
                    rep = await tb.get_result("claimer")
                    # Invitation info are cached for the connection at handshake
                    # time, hence the command won't take into account the fact
                    # that the invitation has been deleted
                    assert rep["status"] == "ok"
示例#4
0
    async def wait_until(self, callback):
        """Implementation shamelessly adapted from:
        https://github.com/pytest-dev/pytest-qt/blob/16b989d700dfb91fe389999d8e2676437169ed44/src/pytestqt/qtbot.py#L459
        """
        __tracebackhide__ = True
        last_exc = None
        try:
            async with real_clock_timeout():
                while True:
                    try:
                        result = callback()
                    except AssertionError as exc:
                        last_exc = exc
                        result = False

                    if result not in (None, True, False):
                        msg = f"waitUntil() callback must return None, True or False, returned {result!r}"
                        raise ValueError(msg)
                    if result in (True, None):
                        return
                    await trio.sleep(0.01)
        except trio.TooSlowError:
            if last_exc:
                raise trio.TooSlowError() from last_exc
            else:
                raise
示例#5
0
        async def step_5_provide_claim_info(self):
            gdce_w = self.greet_device_code_exchange_widget

            async def _claimer_claim(in_progress_ctx,
                                     task_status=trio.TASK_STATUS_IGNORED):
                task_status.started()
                await in_progress_ctx.do_claim_device(
                    requested_device_label=self.requested_device_label)

            self.claimer_claim_task = await start_task(
                self.nursery, _claimer_claim, self.claimer_in_progress_ctx)
            async with real_clock_timeout():
                await self.claimer_claim_task.join()

            def _greet_done():
                assert not gdce_w.isVisible()
                assert autoclose_dialog.dialogs == [
                    ("", "The device was successfully created.")
                ]
                assert self.devices_widget.layout_devices.count() == 3
                # Devices are not sorted in Rust (by insertion)
                device_button = next(
                    (item.widget()
                     for item in self.devices_widget.layout_devices.items
                     if item.widget().label_device_name.text() == "PC1"),
                    None,
                )
                assert isinstance(device_button, DeviceButton)
                assert device_button.device_info.device_label == self.requested_device_label

            await aqtbot.wait_until(_greet_done)
            return
示例#6
0
async def test_init_online_backend_late_reply(server_factory, core_config,
                                              alice, event_bus, backend):
    can_serve_client = trio.Event()

    async def _handle_client(stream):
        await can_serve_client.wait()
        return await backend.handle_client(stream)

    async with server_factory(_handle_client) as server:
        alice = server.correct_addr(alice)
        async with real_clock_timeout():
            async with logged_core_factory(config=core_config,
                                           device=alice,
                                           event_bus=event_bus) as core:
                # We don't want for backend to reply to finish core init
                with core.event_bus.listen() as spy:
                    can_serve_client.set()
                    # Now backend reply, monitor should send events accordingly
                    await spy.wait(
                        CoreEvent.BACKEND_CONNECTION_CHANGED,
                        kwargs={
                            "status": BackendConnStatus.READY,
                            "status_exc": None
                        },
                    )
async def test_greeter_exchange_bad_access(alice, backend, alice_backend_sock, reason):
    if reason == "deleted_invitation":
        invitation = await backend.invite.new_for_device(
            organization_id=alice.organization_id, greeter_user_id=alice.user_id
        )
        await backend.invite.delete(
            organization_id=alice.organization_id,
            greeter=alice.user_id,
            token=invitation.token,
            on=datetime(2000, 1, 2),
            reason=InvitationDeletedReason.ROTTEN,
        )
        token = invitation.token
        status = "already_deleted"
    else:
        assert reason == "unknown_token"
        token = InvitationToken.new()
        status = "not_found"

    greeter_privkey = PrivateKey.generate()
    for command, params in [
        (
            invite_1_greeter_wait_peer,
            {"token": token, "greeter_public_key": greeter_privkey.public_key},
        ),
        (invite_2a_greeter_get_hashed_nonce, {"token": token}),
        (invite_2b_greeter_send_nonce, {"token": token, "greeter_nonce": b"<greeter_nonce>"}),
        (invite_3a_greeter_wait_peer_trust, {"token": token}),
        (invite_3b_greeter_signify_trust, {"token": token}),
        (invite_4_greeter_communicate, {"token": token, "payload": b"<payload>"}),
    ]:
        async with real_clock_timeout():
            rep = await command(alice_backend_sock, **params)
        assert rep == {"status": status}
async def test_concurrency_sends(running_backend, alice, event_bus):
    CONCURRENCY = 10
    work_done_counter = 0
    work_all_done = trio.Event()

    async def sender(cmds, x):
        nonlocal work_done_counter
        rep = await cmds.ping(x)
        assert rep == {"status": "ok", "pong": str(x)}
        work_done_counter += 1
        if work_done_counter == CONCURRENCY:
            work_all_done.set()

    conn = BackendAuthenticatedConn(
        alice.organization_addr,
        alice.device_id,
        alice.signing_key,
        event_bus,
        max_pool=CONCURRENCY // 2,
    )
    async with conn.run():

        async with trio.open_service_nursery() as nursery:
            for x in range(CONCURRENCY):
                nursery.start_soon(sender, conn.cmds, str(x))

        async with real_clock_timeout():
            await work_all_done.wait()
示例#9
0
async def test_unmount_with_fusermount(base_mountpoint, alice, alice_user_fs, event_bus):
    wid = await alice_user_fs.workspace_create(EntryName("w"))
    workspace = alice_user_fs.get_workspace(wid)
    await workspace.touch("/bar.txt")

    async with mountpoint_manager_factory(
        alice_user_fs, event_bus, base_mountpoint
    ) as mountpoint_manager:

        with event_bus.listen() as spy:
            mountpoint_path = await mountpoint_manager.mount_workspace(wid)
            command = f"fusermount -u {mountpoint_path}".split()
            expected = {"mountpoint": mountpoint_path, "workspace_id": wid, "timestamp": None}

            completed_process = await trio.run_process(command)
            async with real_clock_timeout():
                # fusermount might fail for some reasons
                while completed_process.returncode:
                    completed_process = await trio.run_process(command)
                await spy.wait(CoreEvent.MOUNTPOINT_STOPPED, expected)

        assert not await trio.Path(mountpoint_path / "bar.txt").exists()

    # Mountpoint path should be removed on umounting
    assert not await trio.Path(mountpoint_path).exists()
示例#10
0
 async def wait_exposed(self, widget):
     __tracebackhide__ = True
     yield
     async with real_clock_timeout():
         while True:
             if QtTest.QTest.qWaitForWindowExposed(widget, 10):
                 return
             await trio.sleep(0.010)
示例#11
0
async def wait_for_listeners(conn, to_terminate=False):
    async with real_clock_timeout():
        while True:
            rows = await conn.fetch(
                "SELECT pid FROM pg_stat_activity WHERE query ILIKE 'listen %' AND state ILIKE 'idle'"
            )
            if (not to_terminate and rows) or (to_terminate and not rows):
                return [r["pid"] for r in rows]
示例#12
0
async def test_user_claim_but_active_users_limit_reached(
        backend, running_backend, alice):
    # Organization has reached active user limit
    await backend.organization.update(alice.organization_id,
                                      active_users_limit=1)

    # Invitation is still ok...
    invitation = await backend.invite.new_for_user(
        organization_id=alice.organization_id,
        greeter_user_id=alice.user_id,
        claimer_email="*****@*****.**",
    )
    invitation_addr = BackendInvitationAddr.build(
        backend_addr=alice.organization_addr.get_backend_addr(),
        organization_id=alice.organization_id,
        invitation_type=InvitationType.USER,
        token=invitation.token,
    )

    async def _run_greeter():
        async with backend_authenticated_cmds_factory(
                alice.organization_addr, alice.device_id,
                alice.signing_key) as alice_backend_cmds:
            initial_ctx = UserGreetInitialCtx(cmds=alice_backend_cmds,
                                              token=invitation_addr.token)
            in_progress_ctx = await initial_ctx.do_wait_peer()
            in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()
            in_progress_ctx = await in_progress_ctx.do_signify_trust()
            in_progress_ctx = await in_progress_ctx.do_get_claim_requests()

            # ...this is where the limit should be enforced
            with pytest.raises(InviteActiveUsersLimitReachedError):
                await in_progress_ctx.do_create_new_user(
                    author=alice,
                    device_label=in_progress_ctx.requested_device_label,
                    human_handle=in_progress_ctx.requested_human_handle,
                    profile=UserProfile.STANDARD,
                )

    async def _run_claimer():
        async with backend_invited_cmds_factory(addr=invitation_addr) as cmds:
            initial_ctx = await claimer_retrieve_info(cmds)
            in_progress_ctx = await initial_ctx.do_wait_peer()
            in_progress_ctx = await in_progress_ctx.do_signify_trust()
            in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

            await in_progress_ctx.do_claim_user(requested_device_label=None,
                                                requested_human_handle=None)

    async with real_clock_timeout():
        async with trio.open_nursery() as nursery:
            nursery.start_soon(_run_claimer)
            await _run_greeter()
            # Claimer is not notified that the greeter has failed so we
            # must explicitly cancel it
            nursery.cancel_scope.cancel()
示例#13
0
    async def async_call(self, sock, *args, **kwargs):
        check_rep = kwargs.pop("check_rep", self.check_rep_by_default)
        await self._do_send(sock, args, kwargs)

        box = self.AsyncCallRepBox(
            do_recv=partial(self._do_recv, sock, check_rep))
        yield box

        if not box.rep_done:
            async with real_clock_timeout():
                await box.do_recv()
示例#14
0
async def assert_stream_closed_on_peer_side(stream):
    # Peer should send EOF and close connection
    async with real_clock_timeout():
        rep = await stream.receive_some()
        assert rep == b""
        # From now on, trying to send new request should fail
        with pytest.raises(trio.BrokenResourceError):
            # Peer side closing of TCP socket may take time, so it's possible
            # we can still send for a small amount of time
            while True:
                await stream.send_all(b"GET / HTTP/1.0\r\n\r\n")
示例#15
0
async def test_proxy_with_websocket(monkeypatch, connection_type, proxy_type):
    signing_key = SigningKey.generate()
    device_id = DeviceID("zack@pc1")
    proxy_events = []

    def _event_hook(event):
        proxy_events.append(event)

    async with trio.open_nursery() as nursery:
        target_port = await start_port_watchdog(nursery, _event_hook)
        proxy_port = await start_proxy_for_websocket(nursery, target_port,
                                                     _event_hook)

        if proxy_type == "http_proxy":
            proxy_url = f"http://127.0.0.1:{proxy_port}"
            monkeypatch.setitem(os.environ, "http_proxy", proxy_url)
        else:
            assert proxy_type == "http_proxy_pac"
            pac_server_port = await start_pac_server(
                nursery=nursery,
                pac_rule=f"PROXY 127.0.0.1:{proxy_port}",
                event_hook=_event_hook)
            pac_server_url = f"http://127.0.0.1:{pac_server_port}"
            monkeypatch.setitem(os.environ, "http_proxy_pac", pac_server_url)
            # HTTP_PROXY_PAC has priority over HTTP_PROXY
            monkeypatch.setitem(os.environ, "http_proxy",
                                f"http://127.0.0.1:{target_port}")

        async with real_clock_timeout():
            with pytest.raises(BackendNotAvailable):
                if connection_type == "authenticated":
                    await connect_as_authenticated(
                        addr=BackendOrganizationAddr.from_url(
                            f"parsec://127.0.0.1:{target_port}/CoolOrg?no_ssl=true&rvk=7NFDS4VQLP3XPCMTSEN34ZOXKGGIMTY2W2JI2SPIHB2P3M6K4YWAssss"
                        ),
                        device_id=device_id,
                        signing_key=signing_key,
                    )

                else:
                    assert connection_type == "invited"
                    await connect_as_invited(addr=BackendInvitationAddr.from_url(
                        f"parsec://127.0.0.1:{target_port}/CoolOrg?no_ssl=true&action=claim_user&token=3a50b191122b480ebb113b10216ef343"
                    ))

        assert proxy_events == [
            *(["PAC file retreived from server"]
              if proxy_type == "http_proxy_pac" else []),
            "Connected to proxy",
            "Reaching target through proxy",
        ]

        nursery.cancel_scope.cancel()
示例#16
0
 async def wait_signals(self, signals):
     __tracebackhide__ = True
     try:
         async with real_clock_timeout():
             async with AsyncExitStack() as stack:
                 for signal in signals:
                     await stack.enter_async_context(
                         qtrio._core.wait_signal_context(signal))
                 yield
     # Supress context in order to simplify the tracebacks in pytest
     except trio.TooSlowError:
         raise trio.TooSlowError from None
示例#17
0
async def test_invalid_request_line(backend_http_send, running_backend):
    for req in [
        b"\x00",  # Early check should detect this has no chance of being an HTTP request
        b"\r\n\r\n",  # Missing everything :/
        b"HTTP/1.0\r\n\r\n",  # Missing method and target
        "GET /开始 HTTP/1.0\r\n\r\n".encode("utf8"),  # UTF-8 is not ISO-8859-1 !
        b"GET /\xf1 HTTP/1.0\r\n\r\n",  # Target part must be ISO-8859-1
        b"G\xf1T / HTTP/1.0\r\n\r\n",  # Method must be ISO-8859-1
        b"GET / HTTP/42.0\r\n\r\n",  # Only supported in Cyberpunk 2077
    ]:
        async with real_clock_timeout():
            status, _, _ = await backend_http_send(req=req)
            assert status == (400, "Bad Request")
示例#18
0
async def test_reencryption_events(
    backend, alice_backend_sock, alice2_backend_sock, realm, alice, vlobs, vlob_atoms
):

    # Start listening events
    await events_subscribe(alice_backend_sock)

    with backend.event_bus.listen() as spy:
        # Start maintenance and check for events
        await realm_start_reencryption_maintenance(
            alice2_backend_sock, realm, 2, pendulum_now(), {alice.user_id: b"foo"}
        )

        async with real_clock_timeout():
            # No guarantees those events occur before the commands' return
            await spy.wait_multiple(
                [BackendEvent.REALM_MAINTENANCE_STARTED, BackendEvent.MESSAGE_RECEIVED]
            )

        rep = await events_listen_nowait(alice_backend_sock)
        assert rep == {
            "status": "ok",
            "event": APIEvent.REALM_MAINTENANCE_STARTED,
            "realm_id": realm,
            "encryption_revision": 2,
        }
        rep = await events_listen_nowait(alice_backend_sock)
        assert rep == {"status": "ok", "event": APIEvent.MESSAGE_RECEIVED, "index": 1}

        # Do the reencryption
        rep = await vlob_maintenance_get_reencryption_batch(alice_backend_sock, realm, 2, size=100)
        await vlob_maintenance_save_reencryption_batch(alice_backend_sock, realm, 2, rep["batch"])

        # Finish maintenance and check for events
        await realm_finish_reencryption_maintenance(alice2_backend_sock, realm, 2)

        # No guarantees those events occur before the commands' return
        await spy.wait_with_timeout(BackendEvent.REALM_MAINTENANCE_FINISHED)

        rep = await events_listen_nowait(alice_backend_sock)
        assert rep == {
            "status": "ok",
            "event": APIEvent.REALM_MAINTENANCE_FINISHED,
            "realm_id": realm,
            "encryption_revision": 2,
        }

    # Sanity check
    rep = await events_listen_nowait(alice_backend_sock)
    assert rep == {"status": "no_events"}
示例#19
0
            async def _do_close_client():
                if clean_close:
                    await client_stream.aclose()
                else:
                    # Reset the tcp socket instead of regular clean close
                    # See https://stackoverflow.com/a/54065411
                    l_onoff = 1
                    l_linger = 0
                    client_stream.setsockopt(
                        socket.SOL_SOCKET, socket.SO_LINGER,
                        struct.pack("ii", l_onoff, l_linger))
                    client_stream.socket.close()

                async with real_clock_timeout():
                    await outcome_available.wait()
示例#20
0
async def test_postgresql_notification_listener_terminated(
        postgresql_url, backend_factory):

    async with triopg.connect(postgresql_url) as conn:

        with pytest.raises(ConnectionError):

            async with backend_factory(config={"db_url": postgresql_url}):
                pid, = await wait_for_listeners(conn)
                value, = await conn.fetchrow("SELECT pg_terminate_backend($1)",
                                             pid)
                assert value
                # Wait to get cancelled by the backend app
                async with real_clock_timeout():
                    await trio.sleep_forever()
示例#21
0
async def test_delete_invitation(
    alice, backend, alice_backend_sock, alice2_backend_sock, backend_invited_sock_factory
):
    with backend.event_bus.listen() as spy:
        invitation = await backend.invite.new_for_device(
            organization_id=alice.organization_id,
            greeter_user_id=alice.user_id,
            created_on=datetime(2000, 1, 2),
        )
        await spy.wait_multiple_with_timeout([BackendEvent.INVITE_STATUS_CHANGED])

    await events_subscribe(alice2_backend_sock)

    with backend.event_bus.listen() as spy:
        with freeze_time("2000-01-03"):
            rep = await invite_delete(
                alice_backend_sock, token=invitation.token, reason=InvitationDeletedReason.CANCELLED
            )
        assert rep == {"status": "ok"}
        await spy.wait_with_timeout(BackendEvent.INVITE_STATUS_CHANGED)

    async with real_clock_timeout():
        rep = await events_listen_wait(alice2_backend_sock)
    assert rep == {
        "status": "ok",
        "event": APIEvent.INVITE_STATUS_CHANGED,
        "invitation_status": InvitationStatus.DELETED,
        "token": invitation.token,
    }

    # Deleted invitation are no longer visible
    rep = await invite_list(alice_backend_sock)
    assert rep == {"status": "ok", "invitations": []}

    # Can no longer use this invitation to connect to the backend
    with pytest.raises(HandshakeBadIdentity):
        async with backend_invited_sock_factory(
            backend,
            organization_id=alice.organization_id,
            invitation_type=InvitationType.DEVICE,
            token=invitation.token,
        ):
            pass
示例#22
0
async def test_retry_policy_no_retry(postgresql_url, unused_tcp_port,
                                     asyncio_loop):
    host = "127.0.0.1"
    port = unused_tcp_port
    app_config = BackendConfig(
        administration_token="s3cr3t",
        db_min_connections=1,
        db_max_connections=5,
        debug=False,
        blockstore_config=PostgreSQLBlockStoreConfig(),
        email_config=None,
        backend_addr=None,
        forward_proto_enforce_https=None,
        ssl_context=False,
        organization_spontaneous_bootstrap=False,
        organization_bootstrap_webhook_url=None,
        db_url=postgresql_url,
    )

    # No retry
    retry_policy = RetryPolicy(maximum_attempts=0, pause_before_retry=0)

    # Expect a connection error
    with pytest.raises(ConnectionError):
        async with trio.open_nursery() as nursery:
            # Run backend in the background
            nursery.start_soon(lambda: _run_backend(host,
                                                    port,
                                                    ssl_context=False,
                                                    retry_policy=retry_policy,
                                                    app_config=app_config))
            # Connect to PostgreSQL database
            async with triopg.connect(postgresql_url) as conn:
                # Wait for the backend to be connected
                pid, = await wait_for_listeners(conn)
                # Terminate the backend listener connection
                value, = await conn.fetchrow("SELECT pg_terminate_backend($1)",
                                             pid)
                assert value
                # Wait to get cancelled by the connection error `_run_backend`
                async with real_clock_timeout():
                    await trio.sleep_forever()
示例#23
0
async def test_ipc_server(tmpdir, monkeypatch):
    # The dummy directory should be automatically created when the server starts
    file1 = Path(tmpdir / "dummy" / "1.lock")
    mut1 = uuid4().hex

    async def _cmd_handler(cmd):
        assert cmd == {"cmd": IPCCommand.FOREGROUND}
        return {"status": "ok"}

    mut1 = uuid4().hex
    async with real_clock_timeout():
        async with run_ipc_server(_cmd_handler,
                                  socket_file=file1,
                                  win32_mutex_name=mut1):

            with pytest.raises(IPCServerAlreadyRunning):
                async with run_ipc_server(_cmd_handler,
                                          socket_file=file1,
                                          win32_mutex_name=mut1):
                    pass

            # Send good command
            ret = await send_to_ipc_server(file1, IPCCommand.FOREGROUND)
            assert ret == {"status": "ok"}

            # Send bad command, should be catched before even trying to reach the server
            with pytest.raises(IPCServerError) as exc:
                ret = await send_to_ipc_server(file1, "dummy")
            assert str(exc.value).startswith("Invalid message format:")

            # Force bad command to reach the server
            monkeypatch.setattr(
                "parsec.core.ipcinterface.cmd_req_serializer.dump",
                lambda x: x)
            with pytest.raises(IPCServerBadResponse) as exc:
                await send_to_ipc_server(file1, "dummy")
            assert exc.value.rep == {
                "status": "invalid_format",
                "reason": "{'cmd': ['Unsupported value: dummy']}",
            }
示例#24
0
async def test_cancel_mount_workspace(base_mountpoint, alice_user_fs,
                                      event_bus):
    """
    This function tests the race conditions between the mounting of a workspace
    and trio cancellation. In particular, it produces interesting results when trying to
    unmount a workspace while it's still initializing.

    The following timeout values are useful for more thorough testing:

        [x * 0.00001 for x in range(2000, 2500)]
    """
    wid = await alice_user_fs.workspace_create(EntryName("w"))

    # Reuse the same mountpoint manager for all the mountings to
    # make sure state is not polutated by previous mount attempts
    async with mountpoint_manager_factory(
        alice_user_fs, event_bus, base_mountpoint) as mountpoint_manager:

        for timeout in count(0, 0.002):
            print(f"timeout: {timeout}")

            async with real_clock_timeout():

                with trio.move_on_after(timeout) as cancel_scope:
                    await mountpoint_manager.mount_workspace(wid)

                if cancel_scope.cancelled_caught:
                    with pytest.raises(MountpointNotMounted):
                        mountpoint_manager.get_path_in_mountpoint(
                            wid, FsPath("/"))
                else:
                    # Sanity check
                    path = trio.Path(
                        mountpoint_manager.get_path_in_mountpoint(
                            wid, FsPath("/")))
                    await path.exists()
                    # Timeout has become too high to be useful, time to stop the test
                    break
示例#25
0
async def test_init_offline_backend_late_reply(server_factory, core_config,
                                               alice, event_bus):
    can_serve_client = trio.Event()

    async def _handle_client(stream):
        await can_serve_client.wait()
        await stream.aclose()

    async with server_factory(_handle_client) as server:
        alice = server.correct_addr(alice)
        async with real_clock_timeout():
            async with logged_core_factory(config=core_config,
                                           device=alice,
                                           event_bus=event_bus) as core:
                with core.event_bus.listen() as spy:
                    can_serve_client.set()
                    await spy.wait(
                        CoreEvent.BACKEND_CONNECTION_CHANGED,
                        kwargs={
                            "status": BackendConnStatus.LOST,
                            "status_exc": ANY
                        },
                    )
示例#26
0
async def test_greeter_event_on_claimer_join_and_leave(
        alice, backend, bob_backend_sock, alice_backend_sock,
        backend_invited_sock_factory):
    invitation = await backend.invite.new_for_device(
        organization_id=alice.organization_id,
        greeter_user_id=alice.user_id,
        created_on=datetime(2000, 1, 2),
    )

    await events_subscribe(alice_backend_sock)
    await events_subscribe(bob_backend_sock)

    async with backend_invited_sock_factory(
            backend,
            organization_id=alice.organization_id,
            invitation_type=InvitationType.DEVICE,
            token=invitation.token,
    ):

        # Claimer is ready, this should be notified to greeter

        async with real_clock_timeout():
            rep = await events_listen_wait(alice_backend_sock)
            # PostgreSQL event dispatching might be lagging behind and return
            # the IDLE event first
            if rep.get("invitation_status") == InvitationStatus.IDLE:
                rep = await events_listen_wait(alice_backend_sock)
        assert rep == {
            "status": "ok",
            "event": APIEvent.INVITE_STATUS_CHANGED,
            "token": invitation.token,
            "invitation_status": InvitationStatus.READY,
        }

        # No other authenticated users should be notified
        rep = await events_listen_nowait(bob_backend_sock)
        assert rep == {"status": "no_events"}

        rep = await invite_list(alice_backend_sock)
        assert rep == {
            "status":
            "ok",
            "invitations": [{
                "type": InvitationType.DEVICE,
                "token": invitation.token,
                "created_on": datetime(2000, 1, 2),
                "status": InvitationStatus.READY,
            }],
        }

    # Now claimer has left, greeter should be again notified
    async with real_clock_timeout():
        rep = await events_listen_wait(alice_backend_sock)
    assert rep == {
        "status": "ok",
        "event": APIEvent.INVITE_STATUS_CHANGED,
        "token": invitation.token,
        "invitation_status": InvitationStatus.IDLE,
    }

    rep = await invite_list(alice_backend_sock)
    assert rep == {
        "status":
        "ok",
        "invitations": [{
            "type": InvitationType.DEVICE,
            "token": invitation.token,
            "created_on": datetime(2000, 1, 2),
            "status": InvitationStatus.IDLE,
        }],
    }
示例#27
0
 async def _cli_invoke_in_thread(cmd: str):
     # We must run the command from another thread given it will create it own trio loop
     async with real_clock_timeout():
         # Pass DEBUG environment variable for better output on crash
         return await trio.to_thread.run_sync(
             lambda: runner.invoke(cli, cmd, env={"DEBUG": "1"}))
示例#28
0
async def test_invite_and_greet_device(aqtbot, logged_gui, running_backend,
                                       autoclose_dialog,
                                       catch_greet_device_widget, bob):
    requested_device_label = DeviceLabel("PC1")

    # First switch to devices page, and click on "new device" button

    d_w = await logged_gui.test_switch_to_devices_widget()

    aqtbot.mouse_click(d_w.button_add_device, QtCore.Qt.LeftButton)

    # Device invitation widget should show up now with welcome page

    gd_w = await catch_greet_device_widget()
    assert isinstance(gd_w, GreetDeviceWidget)

    gdi_w = await catch_greet_device_widget()
    assert isinstance(gdi_w, GreetDeviceInstructionsWidget)

    def _greet_device_displayed():
        assert gd_w.dialog.isVisible()
        assert gd_w.isVisible()
        assert gd_w.dialog.label_title.text() == "Greet a new device"
        assert gdi_w.isVisible()

    await aqtbot.wait_until(_greet_device_displayed)

    # Now we can setup the boilerplates for the test

    start_claimer = trio.Event()
    start_claimer_trust = trio.Event()
    start_claimer_claim_user = trio.Event()

    greeter_sas = None
    greeter_sas_available = trio.Event()
    claimer_sas = None
    claimer_sas_available = trio.Event()
    claimer_done = trio.Event()

    async def _run_claimer():
        nonlocal greeter_sas
        nonlocal claimer_sas

        async with backend_invited_cmds_factory(
                addr=gdi_w.invite_addr) as cmds:
            await start_claimer.wait()

            initial_ctx = await claimer_retrieve_info(cmds)
            in_progress_ctx = await initial_ctx.do_wait_peer()
            greeter_sas = in_progress_ctx.greeter_sas
            greeter_sas_available.set()

            await start_claimer_trust.wait()

            in_progress_ctx = await in_progress_ctx.do_signify_trust()
            claimer_sas = in_progress_ctx.claimer_sas
            claimer_sas_available.set()
            in_progress_ctx = await in_progress_ctx.do_wait_peer_trust()

            await start_claimer_claim_user.wait()

            await in_progress_ctx.do_claim_device(
                requested_device_label=requested_device_label)
            claimer_done.set()

    async with trio.open_nursery() as nursery:
        nursery.start_soon(_run_claimer)

        # Start the greeting

        aqtbot.mouse_click(gdi_w.button_start, QtCore.Qt.LeftButton)

        def _greet_started():
            assert not gdi_w.button_start.isEnabled()
            assert gdi_w.button_start.text() == "Waiting for the new device..."

        await aqtbot.wait_until(_greet_started)

        # Start the claimer, this should change page to code exchange
        start_claimer.set()

        gdce_w = await catch_greet_device_widget()
        assert isinstance(gdce_w, GreetDeviceCodeExchangeWidget)
        await greeter_sas_available.wait()

        def _greeter_code_displayed():
            assert not gdi_w.isVisible()
            assert gdce_w.isVisible()
            # We should be displaying the greeter SAS code
            assert not gdce_w.label_wait_info.isVisible()
            assert gdce_w.widget_greeter_code.isVisible()
            assert not gdce_w.widget_claimer_code.isVisible()
            assert not gdce_w.code_input_widget.isVisible()
            assert gdce_w.line_edit_greeter_code.text() == greeter_sas.str

        await aqtbot.wait_until(_greeter_code_displayed)

        # Pretent the code was correctly transmitted to the claimer
        start_claimer_trust.set()

        def _claimer_code_choices_displayed():
            assert not gdce_w.label_wait_info.isVisible()
            assert not gdce_w.widget_greeter_code.isVisible()
            assert gdce_w.widget_claimer_code.isVisible()
            assert gdce_w.code_input_widget.isVisible()
            assert gdce_w.code_input_widget.code_layout.count() == 4
            # TODO: better check on codes

        await aqtbot.wait_until(_claimer_code_choices_displayed)

        # Pretend we have choosen the right code
        # TODO: click on button instead of sending the corresponding event
        gdce_w.code_input_widget.good_code_clicked.emit()

        def _wait_claimer_info():
            assert gdce_w.label_wait_info.isVisible()
            assert not gdce_w.widget_greeter_code.isVisible()
            assert not gdce_w.widget_claimer_code.isVisible()

        await aqtbot.wait_until(_wait_claimer_info)

        # Finally claimer info arrive and we finish the greeting !
        start_claimer_claim_user.set()

        def _greet_done():
            assert not gd_w.isVisible()
            assert autoclose_dialog.dialogs == [
                ("", "The device was successfully created.")
            ]
            # Devices list should be updated
            assert d_w.layout_devices.count() == 2
            # Devices are not sorted in Rust (by insertion)
            device = next(
                (item.widget() for item in d_w.layout_devices.items
                 if item.widget().label_device_name.text() ==
                 requested_device_label.str),
                None,
            )
            assert device.label_is_current.text() == ""

        await aqtbot.wait_until(_greet_done)

        async with real_clock_timeout():
            await claimer_done.wait()
示例#29
0
 async def wait_multiple_with_timeout(self, events, in_order=True):
     async with real_clock_timeout():
         await self.wait_multiple(events, in_order=in_order)
示例#30
0
 async def wait_with_timeout(self, event, kwargs=ANY, dt=ANY, update_event_func=None):
     async with real_clock_timeout():
         await self.wait(event, kwargs, dt, update_event_func)