Пример #1
0
        async def runner():
            async with taskgroup.TaskGroup():
                async with taskgroup.TaskGroup() as g2:
                    for _ in range(5):
                        g2.create_task(foo())

                    try:
                        await asyncio.sleep(10)
                    except asyncio.CancelledError:
                        raise
Пример #2
0
 async def stop(self):
     try:
         async with taskgroup.TaskGroup() as g:
             for srv in self._servers:
                 srv.close()
                 g.create_task(srv.wait_closed())
             self._servers.clear()
     finally:
         try:
             async with taskgroup.TaskGroup() as g:
                 for backend in self._backends:
                     g.create_task(backend.close())
                 self._backends.clear()
         finally:
             await super().stop()
Пример #3
0
    async def start(self):
        compilers = []
        pgcons = []

        await super().start()

        async with taskgroup.TaskGroup() as g:
            for _ in range(self.concurrency):
                compilers.append(
                    g.create_task(
                        self.new_compiler(self.database, self.get_dbver())))
                pgcons.append(
                    g.create_task(self.get_server().new_pgcon(self.database)))

        for com_task in compilers:
            self._compilers.put_nowait(com_task.result())
            self._compilers_list.append(com_task.result())

        for con_task in pgcons:
            self._pgcons.put_nowait(con_task.result())
            self._pgcons_list.append(con_task.result())

        nethost = await self._fix_localhost(self._nethost, self._netport)
        srv = await self._loop.create_server(self.build_protocol,
                                             host=nethost,
                                             port=self._netport)

        self._servers.append(srv)
Пример #4
0
 async def runner():
     nonlocal t1, t2
     async with taskgroup.TaskGroup() as g:
         t1 = g.create_task(foo1())
         t2 = g.create_task(foo2())
         await asyncio.sleep(0.1)
         1 / 0
Пример #5
0
    async def _run():
        if num_jobs == 1:
            # Special case for --jobs=1
            for _case, dbname, setup_script in setup:
                await _setup_database(dbname, setup_script, conn)
                if verbose:
                    print(f' -> {dbname}: OK', flush=True)
        else:
            async with taskgroup.TaskGroup(name='setup test cases') as g:
                # Use a semaphore to limit the concurrency of bootstrap
                # tasks to the number of jobs (bootstrap is heavy, having
                # more tasks than `--jobs` won't necessarily make
                # things faster.)
                sem = asyncio.BoundedSemaphore(num_jobs)

                async def controller(coro, dbname, *args):
                    async with sem:
                        await coro(dbname, *args)
                        if verbose:
                            print(f' -> {dbname}: OK', flush=True)

                for _case, dbname, setup_script in setup:
                    g.create_task(
                        controller(_setup_database, dbname, setup_script,
                                   conn))
Пример #6
0
 async def runner():
     nonlocal NUM
     async with taskgroup.TaskGroup():
         try:
             await asyncio.sleep(10)
         except asyncio.CancelledError:
             NUM += 10
             raise
Пример #7
0
 async def nested_runner():
     async with taskgroup.TaskGroup(name='g1') as g1:
         g1.create_task(crash_soon())
         try:
             await asyncio.sleep(10)
         except asyncio.CancelledError:
             await asyncio.sleep(0.5)
             raise
Пример #8
0
        async def runner():
            nonlocal NUM, t2

            async with taskgroup.TaskGroup() as g:
                g.create_task(foo1())
                t2 = g.create_task(foo2())

            NUM += 10
Пример #9
0
    async def start(self):
        if self._running is not None:
            raise RuntimeError(
                'the compiler pool has already been started once')

        self._workers_queue = queue.WorkerQueue(self._loop)

        await self._server.start()
        self._running = True

        dbs: state.DatabasesState = immutables.Map()
        for db in self._dbindex.iter_dbs():
            dbs = dbs.set(
                db.name,
                state.DatabaseState(
                    name=db.name,
                    user_schema=db.user_schema,
                    reflection_cache=db.reflection_cache,
                    database_config=db.db_config,
                ))

        init_args = (
            dbs,
            self._backend_runtime_params,
            self._std_schema,
            self._refl_schema,
            self._schema_class_layout,
            self._dbindex.get_global_schema(),
            self._dbindex.get_compilation_system_config(),
        )
        # Pickle once to later send to multiple worker processes.
        init_args_pickled = pickle.dumps(init_args, -1)

        env = _ENV
        if debug.flags.server:
            env = {'EDGEDB_DEBUG_SERVER': '1', **_ENV}
        self._first_proc = await asyncio.create_subprocess_exec(
            *[
                sys.executable,
                '-m',
                WORKER_MOD,
                '--sockname',
                self._poolsock_name,
                '--numproc',
                str(self._pool_size),
            ],
            env=env,
            stdin=subprocess.DEVNULL,
        )

        await asyncio.wait_for(self._server.wait_until_ready(),
                               PROCESS_INITIAL_RESPONSE_TIMEOUT)

        async with taskgroup.TaskGroup(name='compiler-pool-start') as g:
            for pid in self._server.iter_pids():
                g.create_task(
                    self._attach_worker(pid, init_args, init_args_pickled))
Пример #10
0
 async def runner():
     nonlocal NUM
     async with taskgroup.TaskGroup():
         try:
             await asyncio.sleep(10)
         except asyncio.CancelledError:
             NUM += 10
             # This isn't a good idea, but we have to support
             # this weird case.
             raise MyExc
Пример #11
0
    async def stop(self):
        try:
            async with taskgroup.TaskGroup() as g:
                for srv in self._servers:
                    srv.close()
                    g.create_task(srv.wait_closed())
                self._servers.clear()
        finally:
            try:
                async with taskgroup.TaskGroup() as g:
                    for cmp in self._compilers_list:
                        g.create_task(cmp.close())
                    self._compilers_list.clear()

                for pgcon in self._pgcons_list:
                    pgcon.terminate()
                self._pgcons_list.clear()
            finally:
                await super().stop()
Пример #12
0
    async def start(self):
        self._sup = await supervisor.Supervisor.create()

        await self._server.start()
        self._running = True

        if self._pool_size:
            async with taskgroup.TaskGroup(name='manager-start') as g:
                for _ in range(self._pool_size):
                    g.create_task(self._spawn_for_pool())
Пример #13
0
        async def runner():
            nonlocal NUM
            async with taskgroup.TaskGroup() as g:
                for _ in range(5):
                    g.create_task(foo())

                try:
                    await asyncio.sleep(10)
                except asyncio.CancelledError:
                    NUM += 10
                    raise
Пример #14
0
    async def stop(self):
        self._serving = False

        async with taskgroup.TaskGroup() as g:
            for port in self._ports:
                g.create_task(port.stop())
            self._ports.clear()
            for port in self._sys_conf_ports.values():
                g.create_task(port.stop())
            self._sys_conf_ports.clear()
            g.create_task(self._mgmt_port.stop())
            self._mgmt_port = None
Пример #15
0
    async def test_taskgroup_23(self):
        async def do_job(delay):
            await asyncio.sleep(delay)

        async with taskgroup.TaskGroup() as g:
            for count in range(10):
                await asyncio.sleep(0.1)
                g.create_task(do_job(0.3))
                if count == 5:
                    self.assertLess(len(g._tasks), 5)
            await asyncio.sleep(1.35)
            self.assertEqual(len(g._tasks), 0)
Пример #16
0
    async def new_backend(self, *, dbname: str, dbver: int):
        server = self.get_server()

        async with taskgroup.TaskGroup() as g:
            new_pgcon_task = g.create_task(server.new_pgcon(dbname))
            compiler_task = g.create_task(self.new_compiler(dbname, dbver))

        backend = Backend(
            new_pgcon_task.result(),
            compiler_task.result())

        self._backends.add(backend)
        return backend
Пример #17
0
    async def new_backend(self, *, dbname: str, dbver: int):
        try:
            async with taskgroup.TaskGroup() as g:
                new_pgcon_task = g.create_task(self.new_pgcon(dbname))
                compiler_task = g.create_task(self.new_compiler(dbname, dbver))
        except taskgroup.MultiError as ex:
            # Errors like "database ??? does not exist" should
            # not be obfuscated by a MultiError.
            raise ex.__errors__[0]

        backend = Backend(new_pgcon_task.result(), compiler_task.result())

        self._backends.add(backend)
        return backend
Пример #18
0
        async def runner():
            nonlocal NUM, runner_cancel

            async with taskgroup.TaskGroup() as g:
                g.create_task(foo1())
                g.create_task(foo1())
                g.create_task(foo1())
                g.create_task(foo2())
                try:
                    await asyncio.sleep(10)
                except asyncio.CancelledError:
                    runner_cancel = True
                    raise

            NUM += 10
Пример #19
0
        async def test(delay: float):
            event = asyncio.Event()

            pool = connpool.Pool(
                connect=self.make_fake_connect(),
                disconnect=self.make_fake_disconnect(),
                max_capacity=10,
            )

            async with taskgroup.TaskGroup() as g:
                g.create_task(q0(pool, event))
                await asyncio.sleep(delay)
                g.create_task(q1(pool, event))
                await asyncio.sleep(delay)
                g.create_task(q2(pool, event))
Пример #20
0
    async def _introspect_dbs(self):
        syscon = await self._acquire_sys_pgcon()
        try:
            dbs_query = self.get_sys_query('listdbs')
            json_data = await syscon.parse_execute_json(
                dbs_query, b'__listdbs',
                dbver=0, use_prep_stmt=True, args=(),
            )
            dbnames = json.loads(json_data)
        finally:
            self._release_sys_pgcon()

        async with taskgroup.TaskGroup(name='introspect DBs') as g:
            for dbname in dbnames:
                g.create_task(self.introspect_db(dbname, skip_dropped=True))
Пример #21
0
    async def test_taskgroup_01(self):
        async def foo1():
            await asyncio.sleep(0.1)
            return 42

        async def foo2():
            await asyncio.sleep(0.2)
            return 11

        async with taskgroup.TaskGroup() as g:
            t1 = g.create_task(foo1())
            t2 = g.create_task(foo2())

        self.assertEqual(t1.result(), 42)
        self.assertEqual(t2.result(), 11)
Пример #22
0
    async def start(self):
        # Make sure that EdgeQL parser is preloaded; edgecon might use
        # it to restore config values.
        ql_parser.preload()

        async with taskgroup.TaskGroup() as g:
            g.create_task(self._mgmt_port.start())
            for port in self._ports:
                g.create_task(port.start())

        sys_config = self._dbindex.get_sys_config()
        if 'ports' in sys_config:
            for portconf in sys_config['ports']:
                await self._start_portconf(portconf, suppress_errors=True)

        self._serving = True
Пример #23
0
    async def start(self):
        self._stop_evt.clear()
        assert self._task_group is None
        self._task_group = taskgroup.TaskGroup()
        await self._task_group.__aenter__()
        self._accept_new_tasks = True

        await self._create_compiler_pool()

        # Make sure that EdgeQL parser is preloaded; edgecon might use
        # it to restore config values.
        ql_parser.preload()

        if self._startup_script:
            await binary.EdgeConnection.run_script(
                server=self,
                database=self._startup_script.database,
                user=self._startup_script.user,
                script=self._startup_script.text,
            )

        self._servers, actual_port, listen_addrs = await self._start_servers(
            _fix_wildcard_host(self._listen_hosts), self._listen_port)
        if self._listen_port == 0:
            self._listen_port = actual_port

        self._accepting_connections = True
        self._serving = True

        if self._echo_runtime_info:
            ri = {
                "port": self._listen_port,
                "runstate_dir": str(self._runstate_dir),
                "tls_cert_file": self._tls_cert_file,
            }
            print(f'\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\n', flush=True)

        if self._status_sink is not None:
            status = {
                "listen_addrs": listen_addrs,
                "port": self._listen_port,
                "socket_dir": str(self._runstate_dir),
                "main_pid": os.getpid(),
                "tenant_id": self._tenant_id,
                "tls_cert_file": self._tls_cert_file,
            }
            self._status_sink(f'READY={json.dumps(status)}')
Пример #24
0
    async def stop(self):
        try:
            self._serving = False

            async with taskgroup.TaskGroup() as g:
                for port in self._ports:
                    g.create_task(port.stop())
                self._ports.clear()
                for port in self._sys_conf_ports.values():
                    g.create_task(port.stop())
                self._sys_conf_ports.clear()
                g.create_task(self._mgmt_port.stop())
                self._mgmt_port = None
        finally:
            pgcon = await self._acquire_sys_pgcon()
            self._sys_pgcon_waiters = None
            self.__sys_pgcon = None
            pgcon.terminate()
Пример #25
0
    async def stop(self):
        if not self._running:
            return

        await self._sup.wait()

        await self._server.stop()
        self._server = None

        workers_to_kill = list(self._workers) + list(self._workers_pool)
        self._workers_pool.clear()
        self._workers.clear()
        self._running = False

        async with taskgroup.TaskGroup(
                name=f'{self._name}-manager-stop') as g:
            for worker in workers_to_kill:
                g.create_task(worker.close())
Пример #26
0
    async def stop(self):
        try:
            srv = self._http_proto_server
            if srv is not None:
                self._http_proto_server = None
                srv.close()
                await srv.wait_closed()
        finally:
            try:
                async with taskgroup.TaskGroup() as g:
                    for cmp in self._compilers_list:
                        g.create_task(cmp.close())
                    self._compilers_list.clear()

                if self._http_request_logger is not None:
                    self._http_request_logger.cancel()
                    await self._http_request_logger
            finally:
                await super().stop()
Пример #27
0
    async def _start_servers(self, hosts, port, admin=True):
        servers = {}
        try:
            async with taskgroup.TaskGroup() as g:
                for host in hosts:
                    servers[host] = g.create_task(
                        self._start_server(host, port))
        except Exception:
            await self._stop_servers([
                fut.result() for fut in servers.values()
                if fut.done() and fut.exception() is None
            ])
            raise
        servers = {host: fut.result() for host, fut in servers.items()}

        addrs = []
        for tcp_srv in servers.values():
            for s in tcp_srv.sockets:
                addrs.append(s.getsockname())

        if len(addrs) > 1:
            if port:
                addr_str = f"{{{', '.join(addr[0] for addr in addrs)}}}:{port}"
            else:
                addr_str = f"{{{', '.join('%s:%d' % addr for addr in addrs)}}}"
        elif addrs:
            addr_str = "%s:%d" % addrs[0]
            port = addrs[0][1]
        else:
            addr_str = None

        if addr_str:
            logger.info('Serving on %s', addr_str)

        if admin and port:
            try:
                admin_unix_srv = await self._start_admin_server(port)
            except Exception:
                await self._stop_servers(servers.values())
                raise
            servers[ADMIN_PLACEHOLDER] = admin_unix_srv

        return servers, port, addrs
Пример #28
0
    async def test_taskgroup_03(self):
        async def foo1():
            await asyncio.sleep(1)
            return 42

        async def foo2():
            await asyncio.sleep(0.2)
            return 11

        async with taskgroup.TaskGroup() as g:
            t1 = g.create_task(foo1())
            await asyncio.sleep(0.15)
            # cancel t1 explicitly, i.e. everything should continue
            # working as expected.
            t1.cancel()

            t2 = g.create_task(foo2())

        self.assertTrue(t1.cancelled())
        self.assertEqual(t2.result(), 11)
Пример #29
0
        async def test(delay: float):
            e1 = asyncio.Event()
            e2 = asyncio.Event()
            e3 = asyncio.Event()

            pool = connpool.Pool(
                connect=self.make_fake_connect(),
                disconnect=self.make_fake_disconnect(),
                max_capacity=5,
            )

            async with taskgroup.TaskGroup() as g:
                for _ in range(4):
                    g.create_task(q('A', pool, wait_event=e1))

                await asyncio.sleep(0.1)
                g.create_task(q('B', pool, set_event=e2, wait_event=e3))
                await e2.wait()
                g.create_task(q('B', pool, set_event=e3))

                await asyncio.sleep(0.1)
                e1.set()
Пример #30
0
    async def start(self):
        # Make sure that EdgeQL parser is preloaded; edgecon might use
        # it to restore config values.
        ql_parser.preload()

        async with taskgroup.TaskGroup() as g:
            g.create_task(self._mgmt_port.start())
            for port in self._ports:
                g.create_task(port.start())

        sys_config = self._dbindex.get_sys_config()
        if 'ports' in sys_config:
            for portconf in sys_config['ports']:
                await self._start_portconf(portconf, suppress_errors=True)

        self._serving = True

        if self._echo_runtime_info:
            ri = {
                "port": self._mgmt_port_no,
                "runstate_dir": str(self._runstate_dir),
            }
            print(f'\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\n', flush=True)