Ejemplo n.º 1
0
async def test_merge(assert_run, event_loop):
    with event_loop.assert_cleanup():
        xs = stream.range(1, 5, 2, interval=2) | pipe.delay(1)
        ys = stream.range(0, 5, 2, interval=2) | pipe.merge(xs)
        await assert_run(ys, [0, 1, 2, 3, 4])
        assert event_loop.steps == [1, 1, 1, 1]

    with event_loop.assert_cleanup():
        xs = stream.range(1, 5, 2, interval=2) | pipe.delay(1)
        ys = stream.range(0, 5, 2, interval=2) | pipe.merge(xs)
        await assert_run(ys[:3], [0, 1, 2])
        assert event_loop.steps == [1, 1]

    with event_loop.assert_cleanup():
        xs = stream.just(1) + stream.never()
        ys = xs | pipe.merge(xs) | pipe.timeout(1)
        await assert_run(ys, [1, 1], asyncio.TimeoutError())
        assert event_loop.steps == [1]

    # Reproduce issue #65
    with event_loop.assert_cleanup():
        xs = stream.iterate([1, 2])
        ys = stream.iterate([3, 4])
        zs = stream.merge(xs, ys) | pipe.take(3)
        await assert_run(zs, [1, 2, 3])

    with event_loop.assert_cleanup():
        xs = stream.iterate([1, 2, 3])
        ys = stream.throw(ZeroDivisionError)
        zs = stream.merge(xs, ys) | pipe.delay(1) | pipe.take(3)
        await assert_run(zs, [1, 2, 3])

    # Silencing of a CancelledError

    async def agen1():
        if False:
            yield
        try:
            await asyncio.sleep(2)
        except asyncio.CancelledError:
            return

    async def agen2():
        yield 1

    with event_loop.assert_cleanup():
        xs = stream.merge(agen1(), agen2()) | pipe.delay(1) | pipe.take(1)
        await assert_run(xs, [1])
Ejemplo n.º 2
0
async def main(args):
    app_id = get_app_id(args)
    if app_id is None:
        logger.critical('No APP_ID found')
        return False

    # Loading modules
    logger.info('Loading modules')
    modules = ModuleCache(args.modules)
    assert modules, 'No modules loaded'

    # Loading token
    with closing(appstorage.create('vk-dump', 'the-island.ru')) as db:
        token = await get_or_create_token(args, app_id, db, modules.scope)

    async with TokenSession(token) as session:
        api = API(session)

        rate = TokenLimiter(args.parallel)
        async with aiohttp.ClientSession() as session:
            if not os.path.isdir(args.root):
                logger.info('Creating root dump directory %s', args.root)
                os.makedirs(args.root)

            downloader = Downloader(args.root, session, rate)

            xs = (
                stream.merge(*[m.get_items(api, rate) for m in modules])
                | pipe.map(downloader.download)  # pylint: disable=E1101
            )

            await xs

    logger.info('Done')
Ejemplo n.º 3
0
    async def run(self):
        '''run the engine'''
        # setup future queue
        self._queued_events = deque()

        # await all connections
        await asyncio.gather(*(asyncio.create_task(exch.connect())
                               for exch in self.exchanges))
        await asyncio.gather(*(asyncio.create_task(exch.instruments())
                               for exch in self.exchanges))

        # send start event to all callbacks
        await self.tick(Event(type=EventType.START, target=None))

        async with merge(*(
                exch.tick() for exch in self.exchanges
                if inspect.isasyncgenfunction(exch.tick))).stream() as stream:
            # stream through all events
            async for event in stream:
                # tick exchange event to handlers
                await self.tick(event)

                # TODO move out of critical path
                self._latest = event.target.timestamp if hasattr(
                    event, 'target') and hasattr(event.target,
                                                 'timestamp') else self._latest

                # process any secondary events
                while self._queued_events:
                    event = self._queued_events.popleft()
                    await self.tick(event)

        await self.tick(Event(type=EventType.EXIT, target=None))
Ejemplo n.º 4
0
async def main() -> None:
    new_e3 = NewE3()
    old_e3 = OldE3()

    with open("config.json", "r") as f:
        config = json.loads(f.read())

    username = config.get("studentId", "")
    old_e3_pwd = config.get("oldE3Password", "")
    new_e3_pwd = config.get("newE3Password", "")
    download_path = config.get("downloadPath", "e3")
    gdrive_enable = config.get("gdrive_enable", True)
    download_path = os.path.expanduser(download_path)

    if gdrive_enable:
        store = oauth_file.Storage("token.json")
        creds = store.get()
        if not creds or creds.invalid:
            flow = client.flow_from_clientsecrets("credentials.json", SCOPES)
            creds = tools.run_flow(flow, store)

    while True:
        if username == "":
            username = input("StudentID: ")
        if old_e3_pwd == "":
            old_e3_pwd = getpass("Old E3 Password: "******"", ""
        print("ID or Old E3 Password Error")

    while True:
        if new_e3_pwd == "":
            new_e3_pwd = getpass("New E3 Password: "******""
        print("New E3 Password Error")

    downloader = Downloader(download_path)
    async with stream.merge(new_e3.all_files(), old_e3.all_files()).stream() as files:
        async for file in files:
            downloader.add_file(file)
    modified_files = await downloader.done()

    if gdrive_enable:
        gdirve_client = GDrive(download_path)
        await gdirve_client.upload()

    print("")

    if modified_files:
        print("The below files are added or modified")
        modified_files.sort(key=lambda x: x.course_name)
        for modified_file in modified_files:
            print(f"{modified_file.course_name} - {modified_file.name}")
    else:
        print("No files are added or modified")
Ejemplo n.º 5
0
async def _streamSSEAsync(url, exit=None):
    """internal"""
    from asyncio import Event

    from aiohttp_sse_client import client as sse_client
    from aiostream.stream import merge

    async with sse_client.EventSource(url) as event_source:
        if isinstance(exit, Event):

            async def _waitExit():
                yield await exit.wait()

            waits = (_waitExit(), event_source)
        else:
            waits = (event_source,)

        try:
            async with merge(*waits).stream() as stream:
                try:
                    async for event in stream:
                        if event == True:  # noqa: E712
                            return
                        yield json.loads(event.data)
                except ConnectionError:
                    raise PyEXception("Could not connect to SSE Stream")
                except PyEXStopSSE:
                    return
                except BaseException:
                    raise
        except (json.JSONDecodeError, KeyboardInterrupt):
            raise
Ejemplo n.º 6
0
    def _read_sensor(  # pylint: disable=too-many-arguments
        self, source_uuid: UUID, sid: int, unit: str, topic: str,
        callback_config: AdvancedCallbackConfiguration
    ) -> AsyncGenerator[DataEvent, None]:
        monitor_stream = (
            stream.repeat(self.device, interval=1)
            | pipe.map(
                async_(lambda sensor: sensor.get_callback_configuration(sid)))
            | pipe.map(lambda current_config: None
                       if current_config == callback_config else self.device)
            | pipe.filter(lambda sensor: sensor is not None)
            | pipe.action(lambda sensor: logging.getLogger(__name__).info(
                "Resetting callback config for %s", sensor))
            | pipe.action(
                async_(lambda sensor: sensor.set_callback_configuration(
                    sid, *callback_config)))
            | pipe.filter(lambda x: False))

        return stream.merge(
            stream.just(monitor_stream),
            stream.iterate(self.device.read_events(sids=(sid, )))
            | pipe.map(lambda item: DataEvent(sender=source_uuid,
                                              topic=topic,
                                              value=item.payload,
                                              sid=item.sid,
                                              unit=str(unit))),
        )
Ejemplo n.º 7
0
 async def get_tweets(self):
     queries = [Query(q_str, limit=self.limit, sem=self.sem).get_tweets()
                for q_str in self.q_strs]
     qs = stream.merge(*queries)
     async with qs.stream() as streamer:
         async for q in streamer:
             yield q
Ejemplo n.º 8
0
async def r_invoice_gen(user: User, *_):
    # create new new pub sub client for streaming locally paid invoices
    local_stream = PUBSUB.add_client(user.username)

    # create stream for remotely paid invoices
    remote_stream = await LND.stub.SubscribeInvoices(ln.InvoiceSubscription())
    global_stream = stream.merge(local_stream, remote_stream)

    async with global_stream.stream() as streamer:
        async for response in streamer:
            try:
                # check if response if from lnd
                # external payment or pubsub - local payment
                if isinstance(response, Invoice):
                    # invoice model received from pubsub client
                    # yield and default resolver will retrieve requested fields
                    yield response
                else:
                    # payment comes from lnd,
                    # check if its associated with this user
                    invoice = None
                    if response.state == 1:
                        invoice = await Invoice.get(response.r_hash)
                    if invoice and invoice.payee == user.username:
                        # received a paid invoice with this user as payee
                        updated = await invoice.update(
                            paid=True, paid_at=invoice.settle_date
                        ).apply()
                        yield updated

            except GeneratorExit:
                # user closed stream, del pubsub queue
                del local_stream
                if len(PUBSUB[user.username]) == 0:
                    del PUBSUB[user.username]
Ejemplo n.º 9
0
 async def fetch(self, *args, **kwargs):
     gens = [fetch(url, **kwargs) for url in self._urls]
     async with stream.merge(*gens).stream() as chunks:
         async for chunk in chunks:
             if isinstance(chunk, bytes):
                 yield chunk
             elif isinstance(chunk, Exception):
                 self.errors.append(chunk)
Ejemplo n.º 10
0
    async def run(self):
        '''run the engine'''
        # setup future queue
        self._queued_events = deque()
        self._queued_targeted_events = deque()

        # await all connections
        await asyncio.gather(*(asyncio.create_task(exch.connect())
                               for exch in self.exchanges))
        await asyncio.gather(*(asyncio.create_task(exch.instruments())
                               for exch in self.exchanges))

        # send start event to all callbacks
        await self.processEvent(Event(type=EventType.START, target=None))

        # **************** #
        # Main event loop
        # **************** #
        async with merge(
            *(exch.tick() for exch in self.exchanges + [self]
              if inspect.isasyncgenfunction(exch.tick))).stream() as stream:
            # stream through all events
            async for event in stream:
                # tick exchange event to handlers
                await self.processEvent(event)

                # TODO move out of critical path
                if self._offline():
                    # use time of last event
                    self._latest = event.target.timestamp if hasattr(
                        event, 'target') and hasattr(
                            event.target, 'timestamp') else self._latest
                else:
                    # use now
                    self._latest = datetime.now()

                # process any secondary events
                while self._queued_events:
                    event = self._queued_events.popleft()
                    await self.processEvent(event)

                # process any secondary callback-targeted events (e.g. order fills)
                # these need to route to a specific callback,
                # rather than all callbacks
                while self._queued_targeted_events:
                    strat, event = self._queued_targeted_events.popleft()

                    # send to the generating strategy
                    await self.processEvent(event, strat)

                # process any periodics
                await asyncio.gather(
                    *(asyncio.create_task(p.execute(self._latest))
                      for p in self.manager._periodics))

        # Before engine shutdown, send an exit event
        await self.processEvent(Event(type=EventType.EXIT, target=None))
Ejemplo n.º 11
0
 async def fetch(self, *args, **kwargs):
     gens = [fetch(url, **kwargs) for url in self._urls]
     async with stream.merge(*gens).stream() as chunks:
         async for chunk in chunks:
             if isinstance(chunk, (bytes, str)):
                 yield chunk
             elif isinstance(chunk, Exception):
                 self.errors.append(chunk)
             else:
                 logger.warning("Could not yield chunk")
Ejemplo n.º 12
0
async def aiter_opened_prs(seed_prs, session):
    for pr in seed_prs:
        event = await pr_number_as_pull_event(pr, session)
        if event_is_pull_request_opened(event):
            yield event

    async with stream.merge(aiter_nixpkgs_events(session),
                            aiter_server_events(session)).stream() as streamer:
        async for event in streamer:
            if event_is_pull_request_opened(event):
                yield event
Ejemplo n.º 13
0
 async def all_files(self) -> AsyncIterable[E3File]:
     if self._logged_in_session is None:
         raise RuntimeError("Please Login First")
     courses = list(await self._get_course_list(self._logged_in_session))
     gens = [self._get_course_all_files(course) for course in courses[:-1]]
     gens.append(
         self._get_course_all_files(courses[-1], self._logged_in_session))
     async with stream.merge(*gens).stream() as files:
         async for file in files:
             yield file
     self._logged_in_session = None
Ejemplo n.º 14
0
    async def run(self):
        '''run the engine'''
        # await all connections
        await asyncio.gather(*(asyncio.create_task(exch.connect()) for exch in self.exchanges))

        # send start event to all callbacks
        await self.tick(Event(type=EventType.START, target=None))

        async with merge(*(exch.tick() for exch in self.exchanges)).stream() as stream:
            async for event in stream:
                await self.tick(event)
Ejemplo n.º 15
0
    async def receive(self) -> None:
        '''gemini has its own receive method because it uses 1 connection per symbol instead of multiplexing'''
        async def get_data_sub_pair(ws, sub=None):
            async for ret in ws:
                yield ret, sub

        # add one for private stream
        async for val in stream.merge(*[
                get_data_sub_pair(self.ws[i], sub)
                for i, sub in enumerate(self.subscription() + [None])
        ]):
            jsn = json.loads(val[0].data)

            if isinstance(jsn, dict) and 'events' in jsn:
                events = jsn.get('events', [])
            elif not isinstance(jsn, list):
                events = [jsn]
            else:
                events = jsn

            if val[1]:
                # data stream
                pair = json.loads(val[1]).get('product_id')
            else:
                # private events
                pair = None

            for item in events:
                if item.get('type', 'subscription_ack') in ('subscription_ack',
                                                            'heartbeat'):
                    # can skip these
                    continue
                if item.get('type') == 'accepted':
                    # can ignore these as well, will have a fill and/or booked
                    # https://docs.gemini.com/websocket-api/#workflow
                    continue
                if item.get('type') == 'closed':
                    # can ignore these as well, will have a fill or cancelled
                    # https://docs.gemini.com/websocket-api/#workflow
                    continue

                if pair is None:
                    # private events
                    pair = item['symbol']

                item['symbol'] = pair
                res = self.tickToData(item)

                if not self._running:
                    pass

                if res.type != TickType.HEARTBEAT:
                    self.callback(res.type, res)
Ejemplo n.º 16
0
async def websocket_handler(self, request):
    """
    Push new active visitor counts to websocket.

    Accepts '{"type": "setFilter", "filter": {"account": <id>}}' payload to
    filter out account messages sent on connection.
    """

    # Capture account to send events for.
    account_id = None

    # Prepare request for websocket
    ws = aiohttp.web.WebSocketResponse()
    await ws.prepare(request)

    # combine websocket stream, and active visitors channel
    combined = stream.takewhile(stream.merge(active_visitors_channel, ws),
                                lambda _: not ws.closed)
    async with combined.stream() as s:
        async for event in s:
            if isinstance(event, Event):
                av = event.value
                if (not account_id) or (account_id == av.account_id):
                    await ws.send_json({
                        "account_id": av.account_id,
                        "store_id": av.store_id,
                        "ts_from": av.window[0],
                        "ts_to": av.window[1],
                        "count": av.count,
                    })

            elif event.type == aiohttp.WSMsgType.TEXT:
                if event.data == "close":
                    await ws.close()
                else:
                    try:
                        data = json.loads(event.data)
                        if data.get("type") == "setFilter":
                            account_id = data["filter"]["account"]
                            logger.info(
                                f"Sending messages for account {account_id}")

                    except Exception as e:
                        logger.exception(
                            f"Error handling message '{event.data}': {str(e)}")

            elif event.type == aiohttp.WSMsgType.ERROR:
                logger.warning(
                    f"ws connection closed with error: {ws.exception()}")
Ejemplo n.º 17
0
    async def connect(self, *args, **kwargs):  # NOQA
        self.user = self.scope['user']
        if not self.user.is_authenticated:
            await self.close()
            return

        try:
            qs = parse_qs(self.scope['query_string'].decode())
        except Exception:
            await self.close()
            return

        if 'path' not in qs:
            await self.close()
            return

        self.path = pathlib.Path(qs['path'][0])
        self.preview = preview_re.match(str(self.path))
        self.dummy_val = dict()

        if self.preview:
            await self.accept()
            return

        try:
            self.md = await misc.get_markdown_factory().get_markdown(
                self.path, self.user)
        except KeyError:
            await self.accept()
            return

        streams = list()
        for idx, field in enumerate(self.md.display_fields):
            streams.append(my_stream.display(self, idx))

        for name, field in self.md.input_fields.items():
            streams.append(my_stream.input(self, name))

        self.stream = stream.merge(*streams)

        # python 3.6 does not have create_task() yet
        # self.run_task = asyncio.create_task(self.run())
        self.run_task = asyncio.ensure_future(self.run())

        await self.accept()
Ejemplo n.º 18
0
async def execute_shell_cmd(cmd: str, context: Context, log_outputs=True):

    async with context.start(action="execute shell command",
                             with_labels=["BASH"]) as ctx:
        await ctx.info(text=cmd)
        p = await asyncio.create_subprocess_shell(
            cmd=cmd,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE,
            shell=True)
        outputs = []
        async with stream.merge(p.stdout,
                                p.stderr).stream() as messages_stream:
            async for message in messages_stream:
                outputs.append(message.decode('utf-8'))
                log_outputs and await context.info(text=outputs[-1])
        await p.communicate()
        return p.returncode, outputs
Ejemplo n.º 19
0
async def main():
    async with Bergen(
            host="p-tnagerl-lab1",
            port=8000,
            client_id="DSNwVKbSmvKuIUln36FmpWNVE2KrbS2oRX0ke8PJ",
            client_secret=
            "Gp3VldiWUmHgKkIxZjL2aEjVmNwnSyIGHWbQJo6bWMDoIUlBqvUyoGWUWAe6jI3KRXDOsD13gkYVCZR0po1BLFO9QT4lktKODHDs0GyyJEzmIjkpEOItfdCC4zIa3Qzu",
            name=
            "frankomanko",  # if we want to specifically only use pods on this innstance we would use that it in the selector
    ):

        sleep = await Node.asyncs.get(package="karl", interface="karl")

        twentysleeps = aios.merge(
            *[sleep.stream({"interval": 1}) for i in range(0, 1)])

        async with twentysleeps.stream() as stream:
            async for item in stream:
                print(item)
async def find_congruences_lect11_paralel(g, h, p, B: int, primes: list,
                                          max_equations: int):
    congruences, bases = [], []
    unique = lambda l: list(set(l))
    inv_h = inverse(h, p)
    # while True:
    new_seeds = sample(range(1, 5000), 4)
    #[worker(g, h, p, B, primesB, s) for s in new_seeds]
    combined_stream = stream.merge(
        worker(g, inv_h, p, B, primes, new_seeds[0]),
        worker(g, inv_h, p, B, primes, new_seeds[1]),
        worker(g, inv_h, p, B, primes, new_seeds[2]),
        worker(g, inv_h, p, B, primes, new_seeds[3]))
    async with combined_stream.stream() as streamer:
        async for item in streamer:
            congruences.append((item[0], item[1]))
            bases = unique([
                base for c in [c[0].keys() for c in congruences] for base in c
            ])
            if len(congruences) >= max_equations: break
    return bases, congruences
async def scanner(network, ports=None, timeout=0.5, csv=False):
    """
        main task coroutine which manages all the other functions
        if scanning over the internet, you might want to set the timeout
        to around 1 second, depending on internet speed.
    """
    scan_completed = asyncio.Event()
    scan_completed.clear()  # progress the main loop

    if ports is None:  # list of common-ass ports
        ports = (
            "9,20-23,25,37,41,42,53,67-70,79-82,88,101,102,107,109-111,"
            "113,115,117-119,123,135,137-139,143,152,153,156,158,161,162,170,179,"
            "194,201,209,213,218,220,259,264,311,318,323,383,366,369,371,384,387,"
            "389,401,411,427,443-445,464,465,500,512,512,513,513-515,517,518,520,"
            "513,524,525,530,531,532,533,540,542,543,544,546,547,548,550,554,556,"
            "560,561,563,587,591,593,604,631,636,639,646,647,648,652,654,665,666,"
            "674,691,692,695,698,699,700,701,702,706,711,712,720,749,750,782,829,"
            "860,873,901,902,911,981,989,990,991,992,993,995,8080,2222,4444,1234,"
            "12345,54321,2020,2121,2525,65535,666,1337,31337,8181,6969")
    ports = parse_ports(ports)

    # initialize task generator
    task_gen = task_generator(network, ports, timeout)

    open_ports = list()
    eprint('scanning . . .')
    workers = [task_worker(task_gen) for _ in range(MAX_CONCURRENCY)]
    merged = merge(*workers)
    async with merged.stream() as streamer:
        async for task in streamer:
            open_ports.append(task)

    eprint('gathering output . . .')
    open_ports.sort(key=ip_sort)
    fancy_print(open_ports, csv=csv)

    eprint('shutting down . . .')
Ejemplo n.º 22
0
    async def run(self):
        '''run the engine'''
        # setup future queue
        self._queued_events = deque()

        # await all connections
        await asyncio.gather(*(asyncio.create_task(exch.connect()) for exch in self.exchanges))
        await asyncio.gather(*(asyncio.create_task(exch.instruments()) for exch in self.exchanges))

        # send start event to all callbacks
        await self.tick(Event(type=EventType.START, target=None))

        async with merge(*(exch.tick() for exch in self.exchanges)).stream() as stream:
            # stream through all events
            async for event in stream:
                # tick exchange event to handlers
                await self.tick(event)

                # process any secondary events
                while self._queued_events:
                    event = self._queued_events.popleft()
                    await self.tick(event)

        await self.tick(Event(type=EventType.EXIT, target=None))
Ejemplo n.º 23
0
    async def receive(self) -> None:
        '''gemini has its own receive method because it uses 1 connection per symbol instead of multiplexing'''
        async def get_data_sub_pair(ws, sub=None):
            async for ret in ws:
                yield ret, sub

        async for val in stream.merge(*[
                get_data_sub_pair(self.ws[i], sub)
                for i, sub in enumerate(self.subscription())
        ]):
            pair = json.loads(val[1]).get('product_id')
            jsn = json.loads(val[0].data)
            if jsn.get('type') == 'heartbeat':
                pass
            else:
                for item in jsn.get('events'):
                    item['symbol'] = pair
                    res = self.tickToData(item)

                    if not self._running:
                        pass

                    if res.type != TickType.HEARTBEAT:
                        self.callback(res.type, res)
Ejemplo n.º 24
0
async def docker(ic, args):
    if len(args) < 1:
        yield None
        return

    if not isinstance(args[0], pathlib.Path):
        yield ('err', "⚠ wrong first argument format ⚠")
        return

    docker_path = pathlib.Path(os.path.normpath(ic.path / args[0]))

    dapi = None
    try:
        dapi = aiodocker.Docker()
        con = await get_container(dapi, docker_path, ic.user)
        if con is None:
            return

        logger.info(f"{con['id'][:12]}: created")

        ain = dict()
        aout = dict()
        for n, arg in enumerate(args[1:]):
            ain[n + 1] = stream_enum(
                n + 1, await my_stream.arg_stream(ic, ic.user, arg))

        ws = await con.websocket(stdin=True,
                                 stdout=True,
                                 stderr=True,
                                 stream=True)
        await con.start()

        logger.debug(f"{con['id'][:12]}: started")

        msgs_n = 0
        msgs_ts = time.time()

        try:
            restart = True
            while restart:
                restart = False

                if ain:
                    s = stream.merge(*list(ain.values()), websocket_reader(ws))
                else:
                    s = stream.merge(websocket_reader(ws))

                    # if input is empty, send empty message to container
                    logger.debug(f"{con['id'][:12]}: < []")
                    await ws.send_str(json.dumps(list()) + "\n")

                out = list()
                async with core.streamcontext(s) as streamer:
                    async for item in streamer:
                        if item[0] == 'ws':
                            logger.debug(
                                f"{con['id'][:12]}: > {item[1][:120]}")

                            msgs_n += 1
                            tdiff = time.time() - msgs_ts

                            if tdiff > 20 and msgs_n > 1000 and msgs_n / tdiff > 20:
                                logger.info(
                                    f"{con['id'][:12]}: receiving too much messages {msgs_n} in {tdiff}s"
                                )
                                yield {
                                    'type':
                                    'error',
                                    'val':
                                    f"receiving too much messages {msgs_n} in {tdiff}s"
                                }
                                break

                            m = wi_native_re.match(item[1])
                            if m:
                                if m.group(1) == 'clear':
                                    out = list()
                                    yield {'type': None, 'val': ""}

                                elif m.group(1).startswith('progress'):
                                    yield {
                                        'type':
                                        'html',
                                        'val':
                                        '<div class="progress"><div class="progress-bar progress-bar-striped progress-bar-animated" role="progressbar" aria-valuenow="50" aria-valuemin="0" aria-valuemax="100" style="width: 50%"></div></div>'
                                    }

                                else:
                                    try:
                                        msg = json.loads(m.group(1))
                                        if msg.get('type') in ['getval']:
                                            mid = msg.get('id')
                                            mval = msg.get('val')
                                            muser = msg.get('user')
                                            if muser is None:
                                                muser = ic.user.pk

                                            if mid is None or mval is None:
                                                logger.warning(
                                                    f"{con['id'][:12]}: getval: broken msg: > {item[1][:120]}"
                                                )
                                                continue

                                            if mid in ain:
                                                continue

                                            try:
                                                u = User.objects.get(pk=muser)
                                            except User.DoesNotExist:
                                                logger.warning(
                                                    f"{con['id'][:12]}: getval: unknown user {muser}"
                                                )
                                                continue

                                            arg = pathlib.Path(mval)
                                            ain[mid] = stream_enum(
                                                mid, await
                                                my_stream.arg_stream(
                                                    ic, u, arg))

                                            restart = True
                                            break
                                        elif msg['type'] in ['error']:
                                            yield msg
                                            break
                                        else:
                                            yield msg
                                    except json.JSONDecodeError as e:
                                        logger.warning(
                                            f"{con['id'][:12]}: broken msg: {e!s}"
                                        )
                                        continue

                            else:
                                out.append(item[1])
                                yield {'type': 'stdout', 'val': "\n".join(out)}

                        elif item[0] == 'err':
                            logger.debug(f"{con['id'][:12]}: !! " +
                                         ' '.join(str(item[1]).split())[:120])
                            yield {'type': 'error', 'val': item[1]}
                            break

                        else:
                            await send_item(ws, con, aout, item)

        except GeneratorExit:
            pass

        finally:
            logger.debug(f"{con['id'][:12]}: delete")

            try:
                await con.kill()
                await con.delete()
                pass
            except Exception:
                pass

            ws.close()

    except MyException as e:
        yield {'type': 'error', 'val': f"⚠ {e!s} ⚠"}
    except aiodocker.exceptions.DockerError as e:
        yield {'type': 'error', 'val': f"⚠ {e!s} ⚠"}
    except Exception as e:
        logger.exception(e)
        yield {'type': 'error', 'val': f"⚠ {e!s} ⚠"}

    finally:
        if dapi:
            await dapi.close()
Ejemplo n.º 25
0
async def amerge(*agens) -> AsyncGenerator[Any, None]:
    """Thin wrapper around aiostream.stream.merge."""
    xs = stream.merge(*agens)
    async with xs.stream() as streamer:
        async for x in streamer:
            yield x
Ejemplo n.º 26
0
async def rules_from_kubernetes(ctx) -> AsyncIterable[List[Rule]]:
    """This generator continuously runs, watching Kubernetes for
    certain resources, consuming changes, and determining which
    snapshot rules have been defined.

    Every value it returns is a list of `Rule` objects, a complete
    set of snapshot rules defined at this point in time. Every set
    of rule objects replaces the previous one.
    """

    # These are rules that we are ready to "run".
    rules = {}

    # These are resources that we know we have to recheck, because
    # they will become rules pending a resource creation. For example:
    # A `SnapshotRule` resource points to volume claim. However, this
    # volume claim is not yet bound. Once Kubernetes creates the volume,
    # we will notify us about creating a `PersistentVolume` and updating
    # a `PersistentVolumeClaim`. It will not, however, send us an
    # update for the `SnapshotRule` - where the rule is actually
    # defined. We thus have to link the rule to the volume.
    pending_rules: Dict[Tuple, pykube.objects.APIObject] = {}

    _logger.debug('volume-events.watch')

    merged_stream = stream.merge(
        watch_resources(ctx, pykube.objects.PersistentVolume, delay=0),
        watch_resources(ctx, pykube.objects.PersistentVolumeClaim, delay=2),
        watch_resources(ctx, SnapshotRule, delay=3, allow_missing=True)
    )

    iterable: AsyncIterable[_WatchEvent] = merged_stream.stream()
    async with iterable as merged_events:
        async for event in merged_events:

            _log = _logger.bind(
                event_type=event.type,
                event_object=event.object.obj,
            )
            _log.info(
                events.VolumeEvent.RECEIVED,
                key_hints=[
                    'event_type',
                    'event_object.metadata.name',
                ],
            )

            # This is how we uniquely identify the rule. This is important
            # such that when an object is deleted, we delete the correct
            # rule.
            key_by = (
                event.object.kind,
                event.object.namespace,
                event.object.name
            )

            events_to_process = [
                (event.type, key_by, event.object)
            ]

            # Is there some other object that was depending on *this*
            # object?
            if key_by in pending_rules:
                depending_object_key, depending_object = pending_rules.pop(key_by)
                if event.type != 'DELETED':
                    events_to_process.append(('MODIFIED', depending_object_key, depending_object))

            for (event_type, rule_key, resource) in events_to_process:

                # TODO: there is probably a bug here, where for rule deletion
                # we should not have to first successfully build the rule; the key
                # is enough to delete it. Same with a modification that causes
                # the rule to break; we should remove it until fixed.
                try:
                    if isinstance(resource, SnapshotRule):
                        rule = await rule_from_snapshotrule(ctx, resource)
                    elif isinstance(resource, pykube.objects.PersistentVolumeClaim):
                        rule = await rule_from_persistent_volume_claim(ctx, resource)
                    elif isinstance(resource, pykube.objects.PersistentVolume):
                        rule = await rule_from_persistent_volume(ctx, resource)
                    else:
                        raise RuntimeError(f'{resource} is not supported.')

                except RuleDependsOn as exc:
                    # We have to remember this so that when we get an
                    # update for the dependency that we lack here, we
                    # can process this resource once more.
                    pending_rules[(
                        exc.data['kind'],
                        exc.data['namespace'],
                        exc.data['name'],
                    )] = (rule_key, resource)
                    continue

                if not rule:
                    continue

                _log = _log.bind(
                    rule=rule
                )

                if event_type == 'ADDED' or event_type == 'MODIFIED':
                    if rule:
                        if event_type == 'ADDED' or rule_key not in rules:
                            _log.info(
                                events.Rule.ADDED,
                                key_hints=['rule.name']
                            )
                        else:
                            _log.info(
                                events.Rule.UPDATED,
                                key_hints=['rule.name']
                            )
                        rules[rule_key] = rule
                    else:
                        if rule_key in rules:
                            _log.info(
                                events.Rule.REMOVED,
                                key_hints=['volume_name']
                            )
                            rules.pop(rule_key)

                elif event_type == 'DELETED':
                    if rule_key in rules:
                        _log.info(
                            events.Rule.REMOVED,
                            key_hints=['volume_name']
                        )
                        rules.pop(rule_key)
                else:
                    _log.warning('Unhandled event')

            # We usually have duplicate disks within in `rules`,
            # which is indexed by resource kind. One reason is we
            # watching both PVCs and PVs, and a PVC/PV pair resolve
            # to the same disk. It is also possible that custom rules
            # the user defined contain duplicates. Let's make sure
            # we only have one rule for every disk. Note that which
            # one we pick is undefined.
            #
            # In the (internal) case of PV/PVC pairs it does't matter,
            # since our code is written thus: The rule always references
            # the volume, and we always check the volume, then the claim
            # for deltas. The behaviour for this case is well-defined.
            unique_rules = {rule.disk: rule for rule in rules.values()}.values()
            # TODO: Log in a different place, in a debounced way
            #_logger.info('sync-get-rules.yield', rule_count=len(unique_rules))
            yield list(unique_rules)

        _logger.debug('sync-get-rules.done')
Ejemplo n.º 27
0
async def rules_from_volumes(ctx):
    rules = {}

    _logger.debug('volume-events.watch')

    merged_stream = stream.merge(
        watch_resources(ctx, pykube.objects.PersistentVolume),
        watch_resources(ctx, pykube.objects.PersistentVolumeClaim)
    )

    async with merged_stream.stream() as merged_events:
        async for event in merged_events:
            _log_event = _logger.bind(
                event_type=event.type,
                event_object=event.object.obj,
            )
            _log_event.info(
                events.VolumeEvent.RECEIVED,
                key_hints=[
                    'event_type',
                    'event_object.metadata.name',
                ],
            )
            try:
                volume = await volume_from_resource(ctx, event.object)
            except VolumeNotFound:
                _log_event.exception(
                    events.Volume.NOT_FOUND,
                    key_hints=[
                        'event_type',
                        'event_object.metadata.name',
                    ],
                )
                continue

            volume_name = volume.name
            _log = _logger.new(
                volume_name=volume_name,
                volume_event_type=event.type,
                volume=volume.obj,
            )

            if event.type == 'ADDED' or event.type == 'MODIFIED':
                rule = None
                try:
                    rule = await rule_from_pv(
                        ctx,
                        volume,
                        ctx.config.get('deltas_annotation_key'),
                        use_claim_name=ctx.config.get('use_claim_name'))
                except AnnotationNotFound as exc:
                    _log.info(
                        events.Annotation.NOT_FOUND,
                        key_hints=['volume.metadata.name'],
                        exc_info=exc,
                    )
                except AnnotationError:
                    _log.exception(
                        events.Annotation.ERROR,
                        key_hints=['volume.metadata.name'],
                    )
                except UnsupportedVolume as exc:
                    _log.info(
                        events.Volume.UNSUPPORTED,
                        key_hints=['volume.metadata.name'],
                        exc_info=exc,
                    )

                _log = _log.bind(
                    rule=rule
                )

                if rule:
                    if event.type == 'ADDED' or volume_name not in rules:
                        _log.info(
                            events.Rule.ADDED,
                            key_hints=['rule.name']
                        )
                    else:
                        _log.info(
                            events.Rule.UPDATED,
                            key_hints=['rule.name']
                        )
                    rules[volume_name] = rule
                else:
                    if volume_name in rules:
                        _log.info(
                            events.Rule.REMOVED,
                            key_hints=['volume_name']
                        )
                        rules.pop(volume_name)
            elif event.type == 'DELETED':
                if volume_name in rules:
                    _log.info(
                        events.Rule.REMOVED,
                        key_hints=['volume_name']
                    )
                    rules.pop(volume_name)
            else:
                _log.warning('Unhandled event')

            yield list(rules.values())

        _logger.debug('sync-get-rules.done')
Ejemplo n.º 28
0
async def data_stream_async(url,
                            files,
                            columns=None,
                            map_func=None,
                            reduce_func=None,
                            initializer=None,
                            producer_num=2,
                            data_handler_num=2,
                            executor_type='process'):
    data_type = files[0].split('.')[-1]
    columns_mapping = get_parquet_mapping() if data_type == 'parquet' else None

    if columns:
        c = [k for k, v in columns_mapping.items() if v in columns]
        c = c + list(
            set(columns).difference(set(list(columns_mapping.values()))))
    else:
        c = columns

    global pbar_handler
    pbar_handler = PbarHandler(len(files))

    global executor
    if executor_type == 'process':
        executor = ProcessPoolExecutor(max_workers=data_handler_num)
    elif executor_type == 'thread':
        executor = ThreadPoolExecutor(max_workers=data_handler_num)

    if map_func:
        map_task = partial(data_load, func=map_func)
    else:
        map_task = data_load
    pbar_handler.register(mapper, len(files))

    if reduce_func:
        reduce_task = partial(reducer, func=reduce_func)
    else:
        reduce_task = partial(reducer, func=concat)
        initializer = pd.DataFrame()

    file_streams = [
        stream.preserve(file_stream(files[i::producer_num], url))
        for i in range(producer_num)
    ]
    file_list = []

    aws = (stream.merge(*file_streams)
           | pipe.map(
               async_(lambda x: mapper(x, map_task, c, columns_mapping)),
               task_limit=data_handler_num)
           | pipe.map(async_(lambda x: file_list.append(x[0]) or x[1]),
                      task_limit=data_handler_num))

    if reduce_func:
        pbar_handler.register(reducer, len(files) - 1)
        rs = stream.reduce(aws, async_(reduce_task), initializer)
        reduced = await stream.takelast(rs, 1)
        return reduced
    else:
        data_list = await asyncio.gather(stream.list(aws))
        data_list = data_list[0]
        tmp_list = list(zip(file_list, data_list))
        tmp_list = sorted(tmp_list, key=lambda pair: files.index(pair[0]))
        if map_func:
            return tmp_list
        else:
            return pd.concat(list(map(lambda pair: pair[1], tmp_list)), axis=0)
Ejemplo n.º 29
0
    async def run(self):
        """run the engine"""
        # setup future queue
        self._queued_events = deque()
        self._queued_targeted_events = deque()

        # await all connections
        await asyncio.gather(*(asyncio.create_task(exch.connect())
                               for exch in self.exchanges))
        await asyncio.gather(*(asyncio.create_task(exch.instruments())
                               for exch in self.exchanges))

        # send start event to all callbacks
        await self.processEvent(Event(type=EventType.START, target=None))

        # **************** #
        # Main event loop
        # **************** #
        async with merge(
            *(exch.tick() for exch in self.exchanges + [self]
              if inspect.isasyncgenfunction(exch.tick))).stream() as stream:
            # stream through all events
            async for event in stream:
                # TODO move out of critical path
                if self._offline():
                    # inject periodics
                    # TODO optimize
                    # Manager should keep track of the intervals for its periodics,
                    # then we don't need to go through seconds (which is what the
                    # live engine's `tick` method does below). Instead we can just
                    # calculate exactly the intervals
                    if (self._latest != datetime.fromtimestamp(0)
                            and hasattr(event, "target")
                            and hasattr(event.target, "timestamp")):
                        # TODO in progress optimization
                        intervals = self.manager.periodicIntervals()

                        # not the first tick
                        for _ in range(
                                int((event.target.timestamp -
                                     self._latest).total_seconds() /
                                    intervals)):
                            self._latest = self._latest + timedelta(seconds=1 *
                                                                    intervals)
                            if any(
                                    p.expires(self._latest)
                                    for p in self.manager.periodics()):
                                await asyncio.gather(
                                    *(asyncio.create_task(
                                        p.execute(self._latest))
                                      for p in self.manager.periodics()
                                      if p.expires(self._latest)))

                # tick exchange event to handlers
                await self.processEvent(event)

                # TODO move out of critical path
                if self._offline():
                    # use time of last event
                    self._latest = (event.target.timestamp
                                    if hasattr(event, "target")
                                    and hasattr(event.target, "timestamp") else
                                    self._latest)
                else:
                    # use now
                    self._latest = datetime.now()

                # process any secondary events
                while self._queued_events:
                    event = self._queued_events.popleft()
                    await self.processEvent(event)

                # process any secondary callback-targeted events (e.g. order fills)
                # these need to route to a specific callback,
                # rather than all callbacks
                while self._queued_targeted_events:
                    strat, event = self._queued_targeted_events.popleft()

                    # send to the generating strategy
                    await self.processEvent(event, strat)

                # process any periodics
                await asyncio.gather(
                    *(asyncio.create_task(p.execute(self._latest))
                      for p in self.manager.periodics()
                      if p.expires(self._latest)))

        # Before engine shutdown, send an exit event
        await self.processEvent(Event(type=EventType.EXIT, target=None))
Ejemplo n.º 30
0
async def from_streams(base_reader):
    '''
    Asyncio coroutine task to read from all streams &
    detect and then execute the command based on the confirmation tap
    '''
    try:
        # Wait for half a second before processing the events
        await asyncio.sleep(0.5)
        # Grab the touchpad to draw gesture until this coroutine is cancelled
        base_reader.grab()
        # Init all the readers
        x_movement_reader = Reader(base_reader)
        y_movement_reader = Reader(base_reader)
        tap_detector_reader = Reader(base_reader)

        # Reload gesture command map
        reload_config()

        # Store the received coordinates
        coordinates_set = []
        start_time = end_time = 0

        # Zip the X and Y axis events for clarity.
        # It is processed separtely though when sanitizing the input
        zip_xy = ziplatest(y_movement(x_movement_reader),
                           x_movement(y_movement_reader))
        # Read the tap events as well to indicate
        # the start and end of gesture drawing
        merge_tap_xy = merge(zip_xy,
                             tap_detector(tap_detector_reader))

        async with merge_tap_xy.stream() as merged:
            async for event in merged:
                # The zip_xy events are in the form of tuples
                # while the tap events are evdev event objects
                if not isinstance(event, tuple):
                    if event.value == 1:
                        start_time = event.timestamp()
                    elif event.value == 0:
                        end_time = event.timestamp()
                        # If the draw is too short, ignore and reset
                        # cause it's not meaningful
                        if (end_time - start_time) < 0.3:
                            coordinates_set = []
                            continue

                        detected_gesture = sanitize_and_notify(coordinates_set)
                        # print(f'Detected gesture :- {detected_gesture}')

                        coordinates_set = []
                        if detected_gesture is None:
                            continue
                        # If gesture detected then wait for a confirmation tap
                        tapped, event = await confirmation_tap(base_reader)

                        if tapped:
                            notify(f"Confirmed. Running- {detected_gesture}")
                            execute_command(detected_gesture)
                        else:
                            notify("Clearing gestures")
                else:
                    coordinates_set.append(event)

    except asyncio.CancelledError:
        # Exit all readers from the base_reader once they are done
        x_movement_reader.exit()
        y_movement_reader.exit()
        tap_detector_reader.exit()
        # Ungrab and yield control of touchpad to the user
        base_reader.ungrab()