Ejemplo n.º 1
0
    async def _run(self):
        await self._connect()
        tasks = set()
        reader = asyncio.create_task(self._read_line())
        tasks.add(reader)
        while True:
            done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
            for task in done:
                exception = task.exception()
                if exception is not None:
                    if task is reader:
                        raise exception
                    else:
                        logger.error("Error in event listener", exc_info=exception)

                if task is reader:
                    line = reader.result()
                    reader = asyncio.create_task(self._read_line())
                    tasks.add(reader)
                    event, parameters = parse(line)
                    if event is None:
                        logger.warning("Could not parse %r" % line)
                    else:
                        logger.debug("Got event %s with parameters %r" % (event, parameters))
                        for listener in self._listeners[event]:
                            tasks.add(listener(**parameters))
                        logger.debug("Called %d listeners" % len(self._listeners[event]))
Ejemplo n.º 2
0
 def browse(self, sid=None, cid=None):
     if (sid, cid) in self.browsers:
         self.browsers[sid, cid].lift()
     else:
         browser = self.create_browser(sid, cid)
         self.browsers[sid, cid] = browser
         asyncio.create_task(browser.fill_source())
Ejemplo n.º 3
0
    async def _event_loop(self):
        while True:
            try:
                if "/" in self.host:
                    self._r, w = await asyncio.open_unix_connection(self.host)
                else:
                    self._r, w = await asyncio.open_connection(
                            self.host, self.port)
                break
            except (ConnectionRefusedError, FileNotFoundError):
                await asyncio.sleep(1)

        # mpd's hello, discard it
        await self._read()

        while True:
            cmd, fut, parser = await self.queue.get()
            try:
                w.write(f"{cmd}\n".encode("UTF-8"))
                fut.set_result(await parser())
            except ConnectionError:
                # Queue that command again and reconnect
                self.queue.put_nowait((cmd, fut, parser))
                asyncio.create_task(self._event_loop())
                return
            finally:
                self.queue.task_done()
Ejemplo n.º 4
0
 async def idle(self, state: ConnectionState, cmd: IdleCommand) -> Response:
     response = await self._exec(state.do_command(cmd))
     if not isinstance(response, ResponseOk):
         return response
     await self.write_response(ResponseContinuation(b'Idling.'))
     done = subsystem.get().new_event()
     updates_task = asyncio.create_task(
         self.handle_updates(state, done, cmd))
     done_task = asyncio.create_task(self.read_idle_done(cmd))
     updates_exc: Optional[Exception] = None
     done_exc: Optional[Exception] = None
     try:
         ok = await done_task
     except Exception as exc:
         done_exc = exc
     finally:
         done.set()
     try:
         await updates_task
     except Exception as exc:
         updates_exc = exc
     if updates_exc:
         raise updates_exc
     elif done_exc:
         raise done_exc
     elif not ok:
         return ResponseBad(cmd.tag, b'Expected "DONE".')
     else:
         return response
async def relay(
        dreader: asyncio.StreamReader,
        dwriter: asyncio.StreamWriter,
        ureader: asyncio.StreamReader,
        uwriter: asyncio.StreamWriter,
) -> None:
    """Pass data/EOF from dreader to uwriter, and ureader to dwriter.

    Both writers are ensured to be closed upon exiting this function.
    """
    _logger.debug(
        'Relaying %r <=> %r', dwriter.get_extra_info('peername'),
        uwriter.get_extra_info('peername'))
    utask = asyncio.create_task(_relay_data_side(dreader, uwriter))
    dtask = asyncio.create_task(_relay_data_side(ureader, dwriter))
    async with contexts.aclosing_multiple_writers(dwriter, uwriter):
        try:
            await asyncio.gather(utask, dtask)
            _logger.debug(
                'Relay %r <=> %r ended normally',
                dwriter.get_extra_info('peername'),
                uwriter.get_extra_info('peername'))
        except:
            dtask.cancel()
            utask.cancel()
            raise
        finally:
            await asyncio.wait({dtask, utask})
            for t in (dtask, utask):
                if t.exception():
                    _logger.debug(
                        'Relay task %r caught exception %r', t, t.exception())
Ejemplo n.º 6
0
    def _command(self, cmd, parser):
        if self.queue is None:
            self.queue = asyncio.Queue()
            asyncio.create_task(self._event_loop())

        fut = asyncio.Future()
        self.queue.put_nowait((cmd, fut, parser))
        return fut
Ejemplo n.º 7
0
 def handle_player_queue_changed(self, message):
     pid = int(message["pid"])
     if pid in self.queues:
         logger.warning("Filling queue for player %s", pid)
         queue = self.queues[pid]
         asyncio.create_task(queue.fill_source())
     else:
         logger.warning("Could not fill queue for %s", pid)
         logger.warning("Queue keys: %s", self.queues.keys())
Ejemplo n.º 8
0
 async def _disconnect_in(self, delay: int) -> None:
     await asyncio.sleep(delay)
     logger.debug(f"Disconnect timeout of {delay}s elapsed, disconnecting...")
     # Starting the _disconnect function in another task because otherwise,
     # its own CancelledError would inhibit _disconnect() from completing
     # the disconnect.
     #
     # We don't need to check the state because _disconnect_in only runs
     # while the _state is _RUNNING.
     asyncio.create_task(self._disconnect())
Ejemplo n.º 9
0
 def playlist(self):
     my_pid = int(self._current_player_info()["pid"])
     playlist = wrap_window(
         self,
         PlaylistBrowser,
         frame_arguments={"pid": my_pid},
         width=500,
         height=100
     )
     self.queues[my_pid] = playlist
     asyncio.create_task(playlist.fill_source())
Ejemplo n.º 10
0
    async def setup(self) -> None:
        """
        Setup stage completed asynchronously so we can use asynchronous
        facilities to do the setup

        Note we do not await here,  but use create_task() to schedule the
        app to run later so the caller can launch additional tasks.

        :return:
        """

        create_task(self.fetch_and_listen())
Ejemplo n.º 11
0
 async def delete_mailbox(self, name: str) -> None:
     redis = self._redis
     name_key = modutf7_encode(name)
     multi = redis.multi_exec()
     multi.hget(self._mbx_key, name_key)
     multi.hdel(self._mbx_key, name_key)
     mbx_guid, _ = await multi.execute()
     if mbx_guid is None:
         raise MailboxNotFound(name)
     await redis.zrem(self._order_key, mbx_guid)
     mbx_prefix = b'%b:%b' % (self._prefix, mbx_guid)
     await redis.set(mbx_prefix + b':abort', 1)
     asyncio.create_task(_delete_keys(redis, [mbx_prefix]))
Ejemplo n.º 12
0
 def _handle_event(self, event, message):
     if event == "/player_state_changed":
         self.handle_state_changed(message)
     elif event == "/player_volume_changed":
         self.handle_volume_changed(message)
     elif event == "/player_playback_error":
         self.handle_playback_error(message)
     elif event == "/player_now_playing_changed":
         asyncio.create_task(self.handle_now_playing_changed(message))
     elif event == "/player_now_playing_progress":
         self.handle_now_playing_progress(message)
     elif event == "/player_queue_changed":
         self.handle_player_queue_changed(message)
     else:
         logger.warning("received unhandled event %s", event)
Ejemplo n.º 13
0
    def stop(self, io_loop):
        """
        Asynchronously stop the application.

        :param tornado.ioloop.IOLoop io_loop: loop to run until all
            callbacks, timeouts, and queued calls are complete

        Call this method to start the application shutdown process.
        The IOLoop will be stopped once the application is completely
        shut down.

        """
        running_async = False
        shutdown = _ShutdownHandler(io_loop)
        for callback in self.on_shutdown_callbacks:
            try:
                maybe_future = callback(self.tornado_application)

                if asyncio.iscoroutine(maybe_future):
                    maybe_future = asyncio.create_task(maybe_future)

                if concurrent.is_future(maybe_future):
                    shutdown.add_future(maybe_future)
                    running_async = True
            except Exception as error:
                self.logger.warning('exception raised from shutdown '
                                    'callback %r, ignored: %s',
                                    callback, error, exc_info=1)

        if not running_async:
            shutdown.on_shutdown_ready()
Ejemplo n.º 14
0
        async def main():
            loop = asyncio.get_running_loop()
            loop.call_exception_handler = call_exc_handler_mock

            nonlocal lo_task
            lo_task = asyncio.create_task(leftover())
            return 123
Ejemplo n.º 15
0
async def amain(*, args, prog):
	params = docopt.docopt(
		__doc__.replace("\t", " " * 4).format(prog=os.path.basename(prog)),
		argv=args,
		help=True,
		version=True,
		options_first=False
	)
	url = params.pop("URL")
	assert not params, params

	log = asyncio.Queue()
	pending_tasks = []
	printer_task = asyncio.create_task(
		printer(queue=log, fo=sys.stdout)
	)

	git = Git(output_queue=log, git_path="git")

	out = await git.credential("fill", {
		"url": url,
	})
	await log.put(out)

	await asyncio.gather(*pending_tasks)
	current_task, all_tasks = (asyncio.current_task(), asyncio.all_tasks())
	assert {current_task, printer_task} == all_tasks, (current_task, all_tasks)

	await log.put(None)
	await printer_task
Ejemplo n.º 16
0
async def main():
    # Create a queue that we will use to store our "workload".
    queue = asyncio.Queue()

    # Generate random timings and put them into the queue.
    total_sleep_time = 0
    for _ in range(20):
        sleep_for = random.uniform(0.05, 1.0)
        total_sleep_time += sleep_for
        queue.put_nowait(sleep_for)

    # Create three worker tasks to process the queue concurrently.
    tasks = []
    for i in range(3):
        task = asyncio.create_task(worker(f'worker-{i}', queue))
        tasks.append(task)

    # Wait until the queue is fully processed.
    started_at = time.monotonic()
    await queue.join()
    total_slept_for = time.monotonic() - started_at

    # Cancel our worker tasks.
    for task in tasks:
        task.cancel()
    # Wait until all worker tasks are cancelled.
    await asyncio.gather(*tasks, return_exceptions=True)

    print('====')
    print(f'3 workers slept in parallel for {total_slept_for:.2f} seconds')
    print(f'total expected sleep time: {total_sleep_time:.2f} seconds')
Ejemplo n.º 17
0
        async def main():
            loop = asyncio.get_running_loop()
            loop.call_exception_handler = mock.Mock()

            nonlocal lazyboy
            lazyboy = asyncio.create_task(spin())
            raise FancyExit
Ejemplo n.º 18
0
async def download_many(cc_list):
    async with aiohttp.ClientSession() as session:  # <8>
        res = await asyncio.gather(                 # <9>
            *[asyncio.create_task(download_one(session, cc))
                for cc in sorted(cc_list)])

    return len(res)
Ejemplo n.º 19
0
    async def stop(self) -> None:
        await self.stopping()

        tasks = []
        for rooms in self._rooms.values():
            for room in rooms:
                tasks.append(asyncio.create_task(self.part(room)))
        for task in tasks:
            await task

        self._stop.set()
Ejemplo n.º 20
0
    async def spawn(self, awaitable: Awaitable[Any], daemon: bool=False) -> asyncio.Task:
        event = asyncio.Event()

        async def task():
            async with (self.daemon if daemon else self):
                event.set()
                await awaitable

        result = asyncio.create_task(task())
        await event.wait()
        return result
Ejemplo n.º 21
0
    async def setUp(self):
        # Event used to track if the background task checked if the patch
        # is active
        self.checked = asyncio.Event()

        # This task checks if the object is patched continuously, and sets
        # the checked event everytime it does so.
        self.background_task = asyncio.create_task(
            must_be_patched.crash_if_patched(self.checked))

        # Any test will fail if the background task raises an exception
        self.addCleanup(terminate_and_check_task, self.background_task)
Ejemplo n.º 22
0
 async def handle_updates(self, state: ConnectionState, done: Event,
                          cmd: IdleCommand) -> None:
     timeout = self.config.max_idle_wait
     while not done.is_set():
         receive_task = asyncio.create_task(
             self._exec(state.receive_updates(cmd, done)))
         try:
             untagged = await asyncio.wait_for(receive_task, timeout)
         except TimeoutError:
             pass
         else:
             await shield(self.write_updates(untagged))
Ejemplo n.º 23
0
Archivo: base.py Proyecto: icgood/pymap
 async def run(self, *transports):
     failures = []
     transport_tasks = [asyncio.create_task(
         self._run_transport(transport)) for transport in transports]
     for task in transport_tasks:
         try:
             await task
         except Exception as exc:
             failures.append(exc)
     if failures:
         raise failures[0]
     for transport in transports:
         self._check_queue(transport)
Ejemplo n.º 24
0
    def __call__(self, *args, **keywords):
        key = (args, tuple(keywords.items()))

        async def async_update(block=False):
            if self.running[key].locked and not block:
                return self.cache[key][0]
            async with self.running[key].acquire():
                self.cache[key] = (await self.func(*args, **keywords), time.time())
                return self.cache[key][0]

        def sync_update(block=False):
            if self.running[key].acquire(blocking=block):
                self.cache[key] = (self.func(*args, **keywords), time.time())
            return self.cache[key][0]

        if self._is_async:
            lock_class = asyncio.Lock
            update_func = async_update
        else:
            lock_class = threading.Lock
            update_func = sync_update

        if not self.running.get(key):
            self.running[key] = lock_class()

        if key not in self.cache:
            self._awaitable = update_func(block=True)
            return self._awaitable
        elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
            if self.background:
                if self._is_async:
                    asyncio.create_task(update_func())
                else:
                    threading.Thread(target=update_func).start()
            else:
                self._awaitable = update_func()
                return self._awaitable
        return self.cache[key][0]
Ejemplo n.º 25
0
 async def _connect(self):
     while True:
         try:
             self.device = soco.discovery.by_name(self.zone)
             break
         except (OSError, TypeError):
             await asyncio.sleep(1)
     self.rendering_control = self.device.renderingControl.subscribe(
             auto_renew=True)
     self.executor = ThreadPoolExecutor(max_workers=1)
     self.volume_server = asyncio.create_task(
             asyncio.start_unix_server(
                 self._volume_client_connnected,
                 path=f"/run/user/{os.getuid()}/sonos_volume"))
Ejemplo n.º 26
0
    async def _basetest_huge_content_recvinto(self, address):
        sock = socket.socket()
        sock.setblocking(False)
        DATA_SIZE = 10_000_00

        chunk = b'0123456789' * (DATA_SIZE // 10)

        await self.loop.sock_connect(sock, address)
        await self.loop.sock_sendall(sock,
                                     (b'POST /loop HTTP/1.0\r\n' +
                                      b'Content-Length: %d\r\n' % DATA_SIZE +
                                      b'\r\n'))

        task = asyncio.create_task(self.loop.sock_sendall(sock, chunk))

        array = bytearray(DATA_SIZE)
        buf = memoryview(array)

        nbytes = await self.loop.sock_recv_into(sock, buf)
        data = bytes(buf[:nbytes])
        # HTTP headers size is less than MTU,
        # they are sent by the first packet always
        self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
        while data.find(b'\r\n\r\n') == -1:
            nbytes = await self.loop.sock_recv_into(sock, buf)
            data = bytes(buf[:nbytes])
        # Strip headers
        headers = data[:data.index(b'\r\n\r\n') + 4]
        data = data[len(headers):]

        size = DATA_SIZE
        checker = cycle(b'0123456789')

        expected = bytes(islice(checker, len(data)))
        self.assertEqual(data, expected)
        size -= len(data)

        while True:
            nbytes = await self.loop.sock_recv_into(sock, buf)
            data = buf[:nbytes]
            if not data:
                break
            expected = bytes(islice(checker, len(data)))
            self.assertEqual(data, expected)
            size -= len(data)
        self.assertEqual(size, 0)

        await task
        sock.close()
Ejemplo n.º 27
0
async def run():
    q = asyncio.Queue()

    async def do(afn, *args, **kwargs):
        await q.put((afn, args, kwargs))
        afn, args, kwargs = await q.get()
        try:
            return await afn(*args, **kwargs, add=q.put)
        finally:
            q.task_done()

    async def task(item, i, *, add=None):
        d, n = random.random(), random.random()
        print(f"{item:02}:{i}	d={d:.5f}	n={n:.5f}")
        await asyncio.sleep(d)

        if add is not None:
            if n > 0.75:
                await add((item, i + 1))

    for item in range(20):
        asyncio.create_task((task(item, 0)))

    await q.join()
Ejemplo n.º 28
0
    async def connect(self) -> bool:
        """
        Attempt to create a connection to the Connection's url.

        Returns True if the Connection could connect to the url and is now
        running. Returns False if the Connection could not connect to the url
        and is not running.

        Exceptions:

        This function must be called while the connection is not running,
        otherwise an IncorrectStateException will be thrown. To stop a
        Connection, use disconnect().
        """

        # Special exception message for _CONNECTING.
        if self._state == self._CONNECTING:
            raise IncorrectStateException(("connect() may not be called"
                " multiple times."))

        if self._state != self._NOT_RUNNING:
            raise IncorrectStateException(("disconnect() must complete before"
                " connect() may be called again."))

        logger.debug("Connecting...")

        # Now we're sure we're in the _NOT_RUNNING state, we can set our state.
        # Important: No await-ing has occurred between checking the state and
        # setting it.
        self._state = self._CONNECTING

        success = await self._connect()

        if success:
            logger.debug("Starting event loop")
            self._event_loop = asyncio.create_task(self._run())
            self._state = self._RUNNING
            self._events.fire("connected")
        else:
            self._state = self._NOT_RUNNING

        logger.debug("Sending connected notification")
        async with self._connected_condition:
            self._connected_condition.notify_all()

        logger.debug("Connected" if success else "Connection failed")
        return success
Ejemplo n.º 29
0
    async def setup(self):
        """
        Tasks we run as soon as the server is connected.  This includes turning on events,
        listing available music sources,  etc.

        In the immediate term I am working on a complete cycle of finding a piece of music
        and playing it and I am doing that here because it is easy,  probably if we want
        to separate this from the protocol,  this function can be implemented as a callback.

        :return:
        """
        (self._reader, self._writer) = await open_connection(self._host, HEOS_PORT)
        self._tasks.append(create_task(self.receive_loop()))
        await self.system.register_for_change_events()
        self._players = await _HeosPlayer(self).get_players()
        for player in self._players:
            self.players[player["name"]] = _HeosPlayer(self, player["pid"])
Ejemplo n.º 30
0
    def _handle_updates_sequential(self, tasks: List[asyncio.Task]):
        queue = asyncio.Queue()

        async def run_updates() -> None:
            while True:
                update = await queue.get()
                await self.handle_update(update)
                if isinstance(update, process.ExitUpdate):
                    break

        tasks.append(asyncio.create_task(run_updates()))

        while True:
            update = yield
            queue.put_nowait(update)
            if isinstance(update, process.ExitUpdate):
                break
 async def _(event: Event):
     asyncio.create_task(handle_message(self, event))
Ejemplo n.º 32
0
async def run(voc, config):

    logging.getLogger("hbmqtt.client.plugins.packet_logger_plugin").setLevel(
        logging.WARNING
    )

    # FIXME: Allow MQTT credentials in voc.conf

    client_id = "voc_{hostname}_{time}".format(
        hostname=hostname(), time=time()
    )

    mqtt = MQTTClient(client_id=client_id)
    url = config.get("mqtt_url")

    if url:
        _LOGGER.debug("Using MQTT url from voc.conf")
    else:
        _LOGGER.debug("Using MQTT url from mosquitto_pub")
        mqtt_config = read_mqtt_config()
        try:
            username = mqtt_config["username"]
            password = mqtt_config["password"]
            host = mqtt_config["host"]
            port = mqtt_config["port"]
            url = "mqtts://{username}:{password}@{host}:{port}".format(
                username=username, password=password, host=host, port=port
            )
        except Exception as e:
            exit(e)

    entities = {}

    async def mqtt_task():
        try:
            await mqtt.connect(url, cleansession=False, cafile=certifi.where())
            _LOGGER.info("Connected to MQTT server")
        except ConnectException as e:
            exit("Could not connect to MQTT server: %s" % e)
        while True:
            _LOGGER.debug("Waiting for messages")
            try:
                message = await mqtt.deliver_message()
                packet = message.publish_packet
                topic = packet.variable_header.topic_name
                payload = packet.payload.data.decode("ascii")
                _LOGGER.debug("got message on %s: %s", topic, payload)
                Entity.route_message(topic, payload)
            except ClientException as e:
                _LOGGER.error("MQTT Client exception: %s", e)

    asyncio.create_task(mqtt_task())  # pylint:disable=no-member

    interval = int(config["interval"])
    _LOGGER.info("Polling VOC every %d seconds", interval)
    while True:
        available = await voc.update(journal=True)
        wait_list = []
        for vehicle in voc.vehicles:
            if vehicle not in entities:
                _LOGGER.debug("creating vehicle %s", vehicle)

                dashboard = vehicle.dashboard(**config)

                entities[vehicle] = [
                    Entity(mqtt, instrument, config)
                    for instrument in dashboard.instruments
                ]
            for entity in entities[vehicle]:
                _LOGGER.debug(
                    "%s: %s", entity.instrument.full_name, entity.state
                )
                wait_list.append(entity.publish_discovery())
                wait_list.append(entity.publish_availability(available))
                if available:
                    wait_list.append(entity.publish_state())

        await asyncio.gather(*wait_list)
        _LOGGER.debug("Waiting for new VOC update in %d seconds", interval)
        await asyncio.sleep(interval)
Ejemplo n.º 33
0
 def __init__(self, worker_count: int = 1):
     self._queue = asyncio.Queue()
     self._workers = tuple(
         asyncio.create_task(self._consume_tasks())
         for _ in range(worker_count))
Ejemplo n.º 34
0
    async def by_middleware_req(self, req_id: str, req_action: str,
                                req_payload: typing.Any):
        resp_payload = None
        if req_action in map(lambda x: str(x).lower(), [
                "ClearCache",
                "ChangeAvailability",
                "RemoteStartTransaction",
                "RemoteStopTransaction",
                "SetChargingProfile",
                "ChangeConfiguration",
                "UnlockConnector",
                "UpdateFirmware",
                "SendLocalList",
                "CancelReservation",
                "ReserveNow",
                "Reset",
                "DataTransfer",
        ]):
            resp_payload = {"status": "Accepted"}
        elif req_action == "GetConfiguration".lower():
            resp_payload = {
                "configurationKey": [
                    {
                        "key": "type",
                        "value": "device-simulator",
                        "readonly": "true"
                    },
                    {
                        "key": "server_address",
                        "value": self.server_address,
                        "readonly": "true"
                    },
                    {
                        "key": "identifier",
                        "value": self.deviceId,
                        "readonly": "false"
                    },
                ]
            }
        elif req_action == "GetDiagnostics".lower():
            resp_payload = {"fileName": "fake_file_name.log"}

        if req_action == "RemoteStartTransaction".lower():
            if not self.charge_can_start():
                resp_payload["status"] = "Rejected"
            else:
                options = {
                    "connectorId":
                    req_payload["connectorId"]
                    if "connectorId" in req_payload else 0,
                    "idTag":
                    req_payload["idTag"] if "idTag" in req_payload else "-",
                }
                self.logger.info(
                    f"Device, Read, Request, RemoteStart, Options: {json.dumps(options)}"
                )
                asyncio.create_task(
                    utility.run_with_delay(self.flow_charge(False, **options),
                                           2))

        if req_action == "RemoteStopTransaction".lower():
            if not self.charge_can_stop(req_payload["transactionId"] if
                                        "transactionId" in req_payload else 0):
                resp_payload["status"] = "Rejected"
            else:
                asyncio.create_task(
                    utility.run_with_delay(self.flow_charge_stop(), 2))

        if req_action == "Reset".lower():
            asyncio.create_task(utility.run_with_delay(self.re_initialize(),
                                                       2))

        if resp_payload is not None:
            resp = f"""[{MessageTypes.Resp.value},"{req_id}",{json.dumps(resp_payload)}]"""
            await self._ws.send(resp)
            self.logger.debug(f"Device Read, Request, Responded:\n{resp}")
        else:
            self.logger.warning(
                f"Device Read, Request, Unknown or not supported: {req_action}"
            )
Ejemplo n.º 35
0
async def async_setup(opp, config):
    """Set up TTS."""
    tts = SpeechManager(opp)

    try:
        conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {}
        use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
        cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
        time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
        base_url = conf.get(CONF_BASE_URL)
        opp.data[BASE_URL_KEY] = base_url

        await tts.async_init_cache(use_cache, cache_dir, time_memory, base_url)
    except (OpenPeerPowerError, KeyError):
        _LOGGER.exception("Error on cache init")
        return False

    opp.http.register_view(TextToSpeechView(tts))
    opp.http.register_view(TextToSpeechUrlView(tts))

    # Load service descriptions from tts/services.yaml
    integration = await async_get_integration(opp, DOMAIN)
    services_yaml = integration.file_path / "services.yaml"
    services_dict = cast(
        dict, await opp.async_add_executor_job(load_yaml, str(services_yaml)))

    async def async_setup_platform(p_type, p_config=None, discovery_info=None):
        """Set up a TTS platform."""
        if p_config is None:
            p_config = {}

        platform = await async_prepare_setup_platform(opp, config, DOMAIN,
                                                      p_type)
        if platform is None:
            return

        try:
            if hasattr(platform, "async_get_engine"):
                provider = await platform.async_get_engine(
                    opp, p_config, discovery_info)
            else:
                provider = await opp.async_add_executor_job(
                    platform.get_engine, opp, p_config, discovery_info)

            if provider is None:
                _LOGGER.error("Error setting up platform %s", p_type)
                return

            tts.async_register_engine(p_type, provider, p_config)
        except Exception:  # pylint: disable=broad-except
            _LOGGER.exception("Error setting up platform: %s", p_type)
            return

        async def async_say_handle(service):
            """Service handle for say."""
            entity_ids = service.data[ATTR_ENTITY_ID]
            message = service.data.get(ATTR_MESSAGE)
            cache = service.data.get(ATTR_CACHE)
            language = service.data.get(ATTR_LANGUAGE)
            options = service.data.get(ATTR_OPTIONS)

            try:
                url = await tts.async_get_url_path(p_type,
                                                   message,
                                                   cache=cache,
                                                   language=language,
                                                   options=options)
            except OpenPeerPowerError as err:
                _LOGGER.error("Error on init TTS: %s", err)
                return

            base = tts.base_url or get_url(opp)
            url = base + url

            data = {
                ATTR_MEDIA_CONTENT_ID: url,
                ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
                ATTR_ENTITY_ID: entity_ids,
            }

            await opp.services.async_call(
                DOMAIN_MP,
                SERVICE_PLAY_MEDIA,
                data,
                blocking=True,
                context=service.context,
            )

        service_name = p_config.get(CONF_SERVICE_NAME,
                                    f"{p_type}_{SERVICE_SAY}")
        opp.services.async_register(DOMAIN,
                                    service_name,
                                    async_say_handle,
                                    schema=SCHEMA_SERVICE_SAY)

        # Register the service description
        service_desc = {
            CONF_NAME: f"Say an TTS message with {p_type}",
            CONF_DESCRIPTION:
            f"Say something using text-to-speech on a media player with {p_type}.",
            CONF_FIELDS: services_dict[SERVICE_SAY][CONF_FIELDS],
        }
        async_set_service_schema(opp, DOMAIN, service_name, service_desc)

    setup_tasks = [
        asyncio.create_task(async_setup_platform(p_type, p_config))
        for p_type, p_config in config_per_platform(config, DOMAIN)
    ]

    if setup_tasks:
        await asyncio.wait(setup_tasks)

    async def async_platform_discovered(platform, info):
        """Handle for discovered platform."""
        await async_setup_platform(platform, discovery_info=info)

    discovery.async_listen_platform(opp, DOMAIN, async_platform_discovered)

    async def async_clear_cache_handle(service):
        """Handle clear cache service call."""
        await tts.async_clear_cache()

    opp.services.async_register(
        DOMAIN,
        SERVICE_CLEAR_CACHE,
        async_clear_cache_handle,
        schema=SCHEMA_SERVICE_CLEAR_CACHE,
    )

    return True
Ejemplo n.º 36
0
 def _start_keep_alive(self) -> None:
     self._stop_keep_alive()
     self._keep_alive_task = asyncio.create_task(self._keep_alive())
Ejemplo n.º 37
0
async def run_restarts_under_load(env: ZenithEnv,
                                  pg: Postgres,
                                  acceptors: List[Safekeeper],
                                  n_workers=10,
                                  n_accounts=100,
                                  init_amount=100000,
                                  max_transfer=100,
                                  period_time=4,
                                  iterations=10):
    # Set timeout for this test at 5 minutes. It should be enough for test to complete
    # and less than CircleCI's no_output_timeout, taking into account that this timeout
    # is checked only at the beginning of every iteration.
    test_timeout_at = time.monotonic() + 5 * 60

    pg_conn = await pg.connect_async()
    tenant_id = await pg_conn.fetchval("show zenith.zenith_tenant")
    timeline_id = await pg_conn.fetchval("show zenith.zenith_timeline")

    bank = BankClient(pg_conn, n_accounts=n_accounts, init_amount=init_amount)
    # create tables and initial balances
    await bank.initdb()

    stats = WorkerStats(n_workers)
    workers = []
    for worker_id in range(n_workers):
        worker = run_random_worker(stats, pg, worker_id, bank.n_accounts,
                                   max_transfer)
        workers.append(asyncio.create_task(worker))

    for it in range(iterations):
        assert time.monotonic() < test_timeout_at, 'test timed out'

        victim_idx = it % len(acceptors)
        victim = acceptors[victim_idx]
        victim.stop()

        flush_lsn = await pg_conn.fetchval('SELECT pg_current_wal_flush_lsn()')
        flush_lsn = lsn_to_hex(flush_lsn)
        log.info(f'Postgres flush_lsn {flush_lsn}')

        pageserver_lsn = env.pageserver.http_client().timeline_detail(
            uuid.UUID(tenant_id), uuid.UUID(
                (timeline_id)))["local"]["last_record_lsn"]
        sk_ps_lag = lsn_from_hex(flush_lsn) - lsn_from_hex(pageserver_lsn)
        log.info(
            f'Pageserver last_record_lsn={pageserver_lsn} lag={sk_ps_lag / 1024}kb'
        )

        # Wait until alive safekeepers catch up with postgres
        for idx, safekeeper in enumerate(acceptors):
            if idx != victim_idx:
                await wait_for_lsn(safekeeper, tenant_id, timeline_id,
                                   flush_lsn)

        stats.reset()
        await asyncio.sleep(period_time)
        # assert that at least one transaction has completed in every worker
        stats.check_progress()

        victim.start()

    log.info('Iterations are finished, exiting coroutines...')
    stats.running = False
    # await all workers
    await asyncio.gather(*workers)
    # assert balances sum hasn't changed
    await bank.check_invariant()
    await pg_conn.close()
Ejemplo n.º 38
0
async def get_name_servers():
    print('Getting All')
    actual_task = asyncio.create_task(get_actual_name_servers())
    desired_task = asyncio.create_task(
        get_desired_name_servers(os.environ['HOSTED_ZONE_ID']))
    return await desired_task, await actual_task
Ejemplo n.º 39
0
async def test_update_reports():
    """
    Tests the timely delivery of requested reports
    """
    # Create a server
    logger = logging.getLogger('openleadr')
    logger.setLevel(logging.DEBUG)
    loop = asyncio.get_event_loop()
    server = OpenADRServer(vtn_id='testvtn')

    register_report_future_1 = loop.create_future()
    register_report_future_2 = loop.create_future()
    register_report_futures = [register_report_future_1, register_report_future_2]

    receive_report_future_1 = loop.create_future()
    receive_report_future_2 = loop.create_future()
    receive_report_future_3 = loop.create_future()
    receive_report_future_4 = loop.create_future()
    receive_report_futures = [receive_report_future_1, receive_report_future_2, receive_report_future_3, receive_report_future_4]
    server.add_handler('on_register_report', partial(on_register_report, futures=register_report_futures, receive_futures=receive_report_futures))

    party_future = loop.create_future()
    server.add_handler('on_create_party_registration', partial(on_create_party_registration, future=party_future))

    # Create a client
    client = OpenADRClient(ven_name='myven', vtn_url='http://localhost:8080/OpenADR2/Simple/2.0b')

    # Add 4 reports
    future_1 = loop.create_future()
    client.add_report(callback=partial(collect_data, future=future_1),
                      report_specifier_id='PowerReport',
                      resource_id='Device001',
                      measurement='power_real',
                      sampling_rate=timedelta(seconds=2),
                      unit='W')
    future_2 = loop.create_future()
    client.add_report(callback=partial(collect_data, future=future_2),
                      report_specifier_id='PowerReport',
                      resource_id='Device002',
                      measurement='power_real',
                      sampling_rate=timedelta(seconds=2),
                      unit='W')
    future_3 = loop.create_future()
    client.add_report(callback=partial(collect_data, future=future_3),
                      report_specifier_id='VoltageReport',
                      resource_id='Device001',
                      measurement='voltage',
                      sampling_rate=timedelta(seconds=2),
                      unit='V')
    future_4 = loop.create_future()
    client.add_report(callback=partial(collect_data, future=future_4),
                      report_specifier_id='VoltageReport',
                      resource_id='Device002',
                      measurement='voltage',
                      sampling_rate=timedelta(seconds=2),
                      unit='V')

    assert len(client.reports) == 2
    asyncio.create_task(server.run_async())
    await asyncio.sleep(1)

    # Run the client asynchronously
    print("Running the client")
    asyncio.create_task(client.run())

    print("Awaiting party future")
    await party_future

    print("Awaiting report futures")
    await asyncio.gather(register_report_future_1, register_report_future_2)
    await asyncio.sleep(0.1)
    assert len(server.services['report_service'].report_callbacks) == 4

    print("Awaiting data collection futures")
    await future_1
    await future_2
    await future_3
    await future_4

    print("Awaiting update report futures")
    await asyncio.gather(receive_report_future_1, receive_report_future_2, receive_report_future_3, receive_report_future_4)
    print("Done gathering")

    assert receive_report_future_1.result()[0][1] == future_1.result()
    assert receive_report_future_2.result()[0][1] == future_2.result()
    assert receive_report_future_3.result()[0][1] == future_3.result()
    assert receive_report_future_4.result()[0][1] == future_4.result()

    await client.stop()
    await server.stop()
Ejemplo n.º 40
0
    async def start(self, handlers: List[Callable], devices: dict = None):
        assert self._token, "Login first"
        self._handlers = handlers
        self.devices = devices

        asyncio.create_task(self._connect())
Ejemplo n.º 41
0
 async def start_game(self, channel_name):
     if not self.running_game_task:
         self.running_game_task = asyncio.create_task(self._run_game())
     else:
         await self._send_error(channel_name, "Game is running already")
Ejemplo n.º 42
0
async def inject_recover_scenario_aio(log_dir, config, cluster,
                                      workload_factory, failure_factory):
    cmd_log = path.join(log_dir, config["cmd_log"])
    latency_log = path.join(log_dir, config["latency_log"])
    availability_log = path.join(log_dir, config["availability_log"])

    init_logs(cmd_log, latency_log, availability_log, config["ss_metrics"])
    if not (config["verbose"]):
        gobekli_stdout = logging.getLogger("gobekli-stdout")
        gobekli_stdout.handlers = []

    workload = workload_factory()
    task = asyncio.create_task(workload.start())

    try:
        loop = asyncio.get_running_loop()

        end_time = loop.time() + config["warmup"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)

        # inject
        fault = failure_factory()

        inject_side_thread = ThreadAsyncWaiter(
            lambda: fault.inject(cluster, workload))
        await inject_side_thread.wait(period_ms=500)

        end_time = loop.time() + config["exploitation"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)

        # recover
        await ThreadAsyncWaiter(lambda: fault.recover()).wait(period_ms=500)

        end_time = loop.time() + config["cooldown"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)
    except:
        workload.stop()

        try:
            await task
        except:
            e, v = sys.exc_info()[:2]
            stacktrace = traceback.format_exc()
            chaos_event_log.info(
                m("error on waiting for workflow's tast on handling error",
                  error_type=str(e),
                  error_value=str(v),
                  stacktrace=stacktrace).with_time())

        raise

    workload.stop()
    validation_result = await task
    await workload.dispose()

    scenario = "inject-recover"
    workload = config["workload"]["name"]

    result = ExperimentResult()
    result.is_valid = validation_result.is_valid
    result.error = validation_result.error
    result.title = f"{workload} with {scenario} using {fault.title}"
    result.availability_log = config["availability_log"]
    result.latency_log = config["latency_log"]
    result.analysis = analyze_inject_recover_availability(
        log_dir, config["availability_log"], config["latency_log"])
    return result
Ejemplo n.º 43
0
def main(client, wait_sympol):
    asyncio.create_task(_main(client, wait_sympol))
async def main():  # Don't forget the async!
    led_task = asyncio.create_task(blink(board.D1, 0.25, 10))
    await asyncio.gather(led_task)  # Don't forget the await!
    print("done")
Ejemplo n.º 45
0
async def start_background_task(app, coro):
    app['tasks'].append(asyncio.create_task(coro))
Ejemplo n.º 46
0
 async def _start_streaming(self, req_id: int) -> asyncio.Task:
     return asyncio.create_task(self._client.stream_live_ticks(
             req_id, contract=sample_contracts.gbp_usd_fx(),
             listener=self._listener, tick_type=datatype.LiveTicks.BID_ASK
         )
     )
Ejemplo n.º 47
0
 def _schedule_event(self, coro, event_name, *args, **kwargs):
     wrapped = self._run_event(coro, event_name, *args, **kwargs)
     # Schedules the task
     return asyncio.create_task(wrapped, name=f'discord.py: {event_name}')
Ejemplo n.º 48
0
 def signal_received():
     server.close_all()
     asyncio.create_task(timelord._shutdown())
Ejemplo n.º 49
0
 def _schedule_renew_registration(self):
     asyncio.create_task(self._renew_registration())
     self._discovery_handle = asyncio.get_running_loop().call_later(
         self._discovery_period, self._schedule_renew_registration)
Ejemplo n.º 50
0
async def create_order_queue(app: Application):
    print('Creating order queue and tasks.')
    queue: Queue = asyncio.PriorityQueue(10)
    app[QUEUE_KEY] = queue
    app[TASKS_KEY] = [asyncio.create_task(process_order_worker(i, queue))
                      for i in range(5)]
Ejemplo n.º 51
0
 def keep_alive_open(self):
     asyncio.create_task(self._keep_alive())
Ejemplo n.º 52
0
 def state_changed(self, *args):
     if self.websocket is None:
         return
     asyncio.create_task(self._state_changed(*args))
Ejemplo n.º 53
0
 def __init__(self, bot: Bot) -> None:
     self.bot = bot
     self.db: AsyncIOMotorCollection = bot.plugin_db.get_partition(self)
     asyncio.create_task(self.migrate())
Ejemplo n.º 54
0
async def async_setup_legacy(hass: HomeAssistant, config: ConfigType) -> None:
    """Set up legacy notify services."""
    hass.data.setdefault(NOTIFY_SERVICES, {})

    async def async_setup_platform(
        integration_name: str,
        p_config: ConfigType | None = None,
        discovery_info: DiscoveryInfoType | None = None,
    ) -> None:
        """Set up a notify platform."""
        if p_config is None:
            p_config = {}

        platform = await async_prepare_setup_platform(hass, config, DOMAIN,
                                                      integration_name)

        if platform is None:
            LOGGER.error("Unknown notification service specified")
            return

        full_name = f"{DOMAIN}.{integration_name}"
        LOGGER.info("Setting up %s", full_name)
        with async_start_setup(hass, [full_name]):
            notify_service = None
            try:
                if hasattr(platform, "async_get_service"):
                    notify_service = await platform.async_get_service(  # type: ignore
                        hass, p_config, discovery_info)
                elif hasattr(platform, "get_service"):
                    notify_service = await hass.async_add_executor_job(
                        platform.get_service,
                        hass,
                        p_config,
                        discovery_info  # type: ignore
                    )
                else:
                    raise HomeAssistantError("Invalid notify platform.")

                if notify_service is None:
                    # Platforms can decide not to create a service based
                    # on discovery data.
                    if discovery_info is None:
                        LOGGER.error(
                            "Failed to initialize notification service %s",
                            integration_name,
                        )
                    return

            except Exception:  # pylint: disable=broad-except
                LOGGER.exception("Error setting up platform %s",
                                 integration_name)
                return

            if discovery_info is None:
                discovery_info = {}

            conf_name = p_config.get(CONF_NAME) or discovery_info.get(
                CONF_NAME)
            target_service_name_prefix = conf_name or integration_name
            service_name = slugify(conf_name or SERVICE_NOTIFY)

            await notify_service.async_setup(hass, service_name,
                                             target_service_name_prefix)
            await notify_service.async_register_services()

            hass.data[NOTIFY_SERVICES].setdefault(integration_name,
                                                  []).append(notify_service)
            hass.config.components.add(f"{DOMAIN}.{integration_name}")

    setup_tasks = [
        asyncio.create_task(async_setup_platform(integration_name, p_config))
        for integration_name, p_config in config_per_platform(config, DOMAIN)
    ]

    if setup_tasks:
        await asyncio.wait(setup_tasks)

    async def async_platform_discovered(
            platform: str, info: DiscoveryInfoType | None) -> None:
        """Handle for discovered platform."""
        await async_setup_platform(platform, discovery_info=info)

    discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
Ejemplo n.º 55
0
async def run_data_acquisition(bmxs, f):
    # orientation quaternions
    q = [np.zeros(shape=(NUM_SAMPLES, 4)) for i in range(len(bmxs))
         ]  # [quat.Quaternion([0., 0., 0., 0.]) for i in range(120)]

    # position
    pos = [np.zeros(shape=(NUM_SAMPLES, 3)) for i in range(len(bmxs))]

    # velocity
    vel = [np.zeros(shape=(NUM_SAMPLES, 3)) for i in range(len(bmxs))]

    # Setup initial orientation, velocity and position

    for qi in q:
        qi[NUM_SAMPLES - 1] = np.array(
            [1., 0., 0., 0.])  # quat.Quaternion(np.array([1.,0.,0.,0.]))

    # Wait for IMU  to init

    for bmx in bmxs:
        while not bmx.begin():
            print("Waiting for IMU...")
            time_lib.sleep(1)

    last_time = [0] * len(bmxs)

    current_inc = 0

    while 1:
        # Create tasks

        num_tasks = 0
        tasks = [None] * len(bmxs)

        for bmx in bmxs:
            tasks[num_tasks] = asyncio.create_task(
                data_loop(bmx, q[num_tasks], pos[num_tasks], vel[num_tasks],
                          last_time[num_tasks]))

            num_tasks += 1

        for i in range(len(tasks)):
            (q[i], last_time[i]) = await tasks[i]

        if DEBUG_MODE == 1:
            print("q")
            print(q)
            # print("pos")
            # print(pos)
            print(samp_freq)

        current_inc += 1

        if (current_inc % 10 == 0):
            f.seek(0)
            for i in range(len(q)):
                f.write("{},{},{},{}\n".format(q[i][NUM_SAMPLES - 1][0],
                                               q[i][NUM_SAMPLES - 1][1],
                                               q[i][NUM_SAMPLES - 1][2],
                                               q[i][NUM_SAMPLES - 1][3]))
                # await print_output(q[i], pos[i], vel[i], i)
            f.flush()
Ejemplo n.º 56
0
def setup(bot):
    cog = RoomTools(bot)
    bot.add_cog(cog)
    cog.init()
    asyncio.create_task(maybe_notify(bot))
Ejemplo n.º 57
0
    async def _connect(self, fails: int = 0):
        """Permanent connection loop to Cloud Servers."""
        resp = await self._api('post', 'dispatch/app', {'accept': 'ws'})
        if resp:
            try:
                url = f"wss://{resp['IP']}:{resp['port']}/api/ws"
                self._ws = await self.session.ws_connect(url,
                                                         heartbeat=55,
                                                         ssl=False)

                ts = time.time()
                payload = {
                    'action': 'userOnline',
                    'at': self._token,
                    'apikey': self._apikey,
                    'userAgent': 'app',
                    'appid': 'oeVkj2lYFGnJu5XUtWisfW4utiN4u9Mq',
                    'nonce': str(int(ts / 100)),
                    'ts': int(ts),
                    'version': 8,
                    'sequence': str(int(ts * 1000))
                }
                await self._ws.send_json(payload)

                msg: WSMessage = await self._ws.receive()
                _LOGGER.debug(f"Cloud init: {json.loads(msg.data)}")

                async for msg in self._ws:
                    fails = 0

                    if msg.type == WSMsgType.TEXT:
                        resp = json.loads(msg.data)
                        await self._process_ws_msg(resp)

                    elif msg.type == WSMsgType.CLOSED:
                        _LOGGER.debug(f"Cloud WS Closed: {msg.data}")
                        break

                    elif msg.type == WSMsgType.ERROR:
                        _LOGGER.debug(f"Cloud WS Error: {msg.data}")
                        break

                # can't run two WS on same account in same time
                if time.time() - ts < 10 and fails < FAST_DELAY:
                    _LOGGER.error(CLOUD_ERROR)
                    fails = FAST_DELAY

            except ClientConnectorError as e:
                _LOGGER.error(f"Cloud WS Connection error: {e}")

            except (asyncio.CancelledError, RuntimeError) as e:
                if isinstance(e, RuntimeError):
                    assert e.args[0] == 'Session is closed', e.args

                _LOGGER.debug(f"Cancel WS Connection: {e}")
                if not self._ws.closed:
                    await self._ws.close()
                return

            except Exception:
                _LOGGER.exception(f"Cloud WS exception")

        delay = RETRY_DELAYS[fails]
        _LOGGER.debug(f"Cloud WS retrying in {delay} seconds")
        await asyncio.sleep(delay)

        if fails + 1 < len(RETRY_DELAYS):
            fails += 1

        asyncio.create_task(self._connect(fails))
Ejemplo n.º 58
0
async def main(argv):
    global wg
    # set the directory for the script
    os.chdir(os.path.dirname(sys.argv[0]))

    ## Read config
    BLITZAPP_FOLDER = '.'
    try:
        if os.path.isfile(FILE_CONFIG):
            config = configparser.ConfigParser()
            config.read(FILE_CONFIG)

            configOptions 	= config['EXTRACT_TANKOPEDIA']
            BLITZAPP_FOLDER = configOptions.get('blitz_app_dir', BLITZAPP_FOLDER)
    except:
        pass
    
    parser = argparse.ArgumentParser(description='Extract Tankopedia data from Blitz game files')
    parser.add_argument('blitz_app_base', type=str, nargs='?', metavar="BLITZAPP_FOLDER", default=BLITZAPP_FOLDER, help='Base dir of the Blitz App files')
    parser.add_argument('tanks', type=str, default='tanks.json', nargs='?', metavar="TANKS_FILE", help='File to write Tankopedia')
    parser.add_argument('maps', type=str, default='maps.json', nargs='?', metavar='MAPS_FILE', help='File to write map names')
    arggroup = parser.add_mutually_exclusive_group()
    arggroup.add_argument('-d', '--debug', action='store_true', default=False, help='Debug mode')
    arggroup.add_argument('-v', '--verbose', action='store_true', default=False, help='Verbose mode')
    arggroup.add_argument('-s', '--silent', action='store_true', default=False, help='Silent mode')
        
    args = parser.parse_args()
    bu.set_log_level(args.silent, args.verbose, args.debug)

    wg = WG()
    
    tasks = []
    for nation in wg.NATION:
        tasks.append(asyncio.create_task(extract_tanks(args.blitz_app_base, nation)))

    tanklist = []
    for tanklist_tmp in await asyncio.gather(*tasks):
        tanklist.extend(tanklist_tmp)
    
    tank_strs, map_strs = await read_user_strs(args.blitz_app_base)

    json_data = None
    userStrs = {}
    tanks = {}
    if os.path.exists(args.tanks):
        try:
            async with aiofiles.open(args.tanks) as infile:
                json_data = json.loads(await infile.read())
                userStrs = json_data['userStr']
                tanks = json_data['data']
        except Exception as err:
            bu.error('Unexpected error when reading file: ' + args.tanks + ' : ' + str(err))

    async with aiofiles.open(args.tanks, 'w', encoding="utf8") as outfile:
        new_tanks, new_userStrs = await convert_tank_names(tanklist, tank_strs)
        # merge old and new tankopedia
        tanks.update(new_tanks)
        userStrs.update(new_userStrs) 
        tankopedia = collections.OrderedDict()
        tankopedia['status'] = 'ok'
        tankopedia['meta'] = { "count":  len(tanks) }
        tankopedia['data'] = bu.sort_dict(tanks, number=True)
        tankopedia['userStr'] = bu.sort_dict(userStrs)
        bu.verbose_std('New tankopedia \'' + args.tanks + '\' contains ' + str(len(tanks)) + ' tanks')
        bu.verbose_std('New tankopedia \'' + args.tanks + '\' contains ' + str(len(userStrs)) + ' tank strings')        
        await outfile.write(json.dumps(tankopedia, ensure_ascii=False, indent=4, sort_keys=False))
    
    if args.maps != None:
        maps = {}
        if os.path.exists(args.maps):
            try:
                async with aiofiles.open(args.maps) as infile:
                    maps = json.loads(await infile.read())
            except Exception as err:
                bu.error('Unexpected error when reading file: ' + args.maps + ' : ' + str(err))
        # merge old and new map data
        maps.update(map_strs)
        async with aiofiles.open(args.maps, 'w', encoding="utf8") as outfile:
            bu.verbose_std('New maps file \'' + args.maps + '\' contains ' + str(len(maps)) + ' maps')
            await outfile.write(json.dumps(maps, ensure_ascii=False, indent=4, sort_keys=True))

    return None
    async def start(self):
        """
        Start running client and run initial set up

        Atm, User init stuff is hadnled here wheras draft init is handled in 
        init_listener. Will probs make sense to either have all start here or
        all start in init_listener. Depends on whether we want bot to know when we 
        have inited or not I think.
        """
        logger.info("FantasyHandler Starting")
        asyncio.create_task(self.client.run())
        user_resp = await self.client.send_sub_users(SubUser(toggle=True))
        # WARNING
        # bit of a mess but user-id always uuid, draft_id always str
        self.users = {
            str(u["external_user_id"]): ExternalUser(**u)
            for u in user_resp["data"]["users"]
        }
        logger.info(f"FantasyHandler received {len(self.users)} users")
        logger.debug(f'FantasyHandler users received: {pformat(self.users)}')
        self.discord_user_id_to_fantasy_id = {
            u.meta["discord_id"]: u.external_user_id
            for u in self.users.values()
        }
        league_resp = (await self.client.send_sub_leagues(SubLeague(all=True)
                                                          ))["data"]
        if league_resp:
            self.league = (await
                           self.client.send_sub_leagues(SubLeague(all=True)
                                                        ))["data"][0]
            self.user_id_to_team = {
                str(t["external_user_id"]): FantasyTeam(**t)
                for t in self.league["fantasy_teams"]
            }
            self.team_id_to_user_id = {
                t["fantasy_team_id"]: t["external_user_id"]
                for t in self.league["fantasy_teams"]
            }
        else:
            self.user_id_to_team = {}
            self.league = None

        drafts_resp = await self.client.send_sub_drafts(SubDraft(all=True))
        self.drafts = {
            draft["draft_id"]: draft
            for draft in drafts_resp["data"]
        }
        self.team_id_to_draft_id = {
            str(team["fantasy_team_id"]): d["draft_id"]
            for d in self.drafts.values() for team in d["team_drafts"]
        }
        for draft in self.drafts.values():
            self.draft_ids_to_channel_ids[
                draft["draft_id"]] = draft["meta"].get("channel_id")
            self.channel_ids_to_draft_ids[draft["meta"].get(
                "channel_id")] = draft["draft_id"]

            self.draft_players_picked[draft["draft_id"]] = {
                pick["player_id"]
                for td in draft["team_drafts"] for pick in td["active_picks"]
            }

        self.draft_choices = {
            d["draft_id"]: self.sorted_draft_choices(d)
            for d in self.drafts.values()
        }
        logger.info("FantasyHandler Loaded")
Ejemplo n.º 60
0
    async def process_files_from_master(self, name: str, file_received: asyncio.Event):
        """Perform relevant actions for each file according to its status.

        Process integrity files coming from the master. It updates necessary information and sends the master
        any required extra_valid files.

        Parameters
        ----------
        name : str
            Task ID that was waiting for the file to be received.
        file_received : asyncio.Event
            Asyncio event that is unlocked once the file has been received.
        """
        logger = self.task_loggers['Integrity sync']

        try:
            await asyncio.wait_for(file_received.wait(),
                                   timeout=self.cluster_items['intervals']['communication']['timeout_receiving_file'])
        except Exception:
            await self.send_request(
                command=b'syn_i_w_m_r',
                data=b'None ' + json.dumps(timeout_exc := WazuhClusterError(
                    3039, extra_message=f'Integrity sync at {self.name}'), cls=c_common.WazuhJSONEncoder).encode())
            raise timeout_exc

        if isinstance(self.sync_tasks[name].filename, Exception):
            exc_info = json.dumps(exception.WazuhClusterError(
                1000, extra_message=str(self.sync_tasks[name].filename)), cls=c_common.WazuhJSONEncoder)
            await self.send_request(command=b'syn_i_w_m_r', data=b'None ' + exc_info.encode())
            raise self.sync_tasks[name].filename

        zip_path = ""
        # Path of the zip containing a JSON with metadata and files to be updated in this worker node.
        received_filename = self.sync_tasks[name].filename

        try:
            self.integrity_sync_status['date_start'] = datetime.utcnow().timestamp()
            logger.info("Starting.")

            """
            - zip_path contains the path of the unzipped directory
            - ko_files contains a Dict with this structure:
              {'missing': {'<file_path>': {<MD5, merged, merged_name, etc>}, ...},
               'shared': {...}, 'extra': {...}, 'extra_valid': {...}}
            """
            ko_files, zip_path = await cluster.run_in_pool(self.loop, self.manager.task_pool, cluster.decompress_files,
                                                           received_filename)
            logger.info("Files to create: {} | Files to update: {} | Files to delete: {} | Files to send: {}".format(
                len(ko_files['missing']), len(ko_files['shared']), len(ko_files['extra']), len(ko_files['extra_valid']))
            )

            if ko_files['shared'] or ko_files['missing'] or ko_files['extra']:
                # Update or remove files in this worker node according to their status (missing, extra or shared).
                logger.debug("Worker does not meet integrity checks. Actions required.")
                logger.debug("Updating local files: Start.")
                await cluster.run_in_pool(self.loop, self.manager.task_pool, self.update_master_files_in_worker,
                                          ko_files, zip_path, self.cluster_items, self.task_loggers['Integrity sync'])
                logger.debug("Updating local files: End.")

            # Send extra valid files to the master.
            if ko_files['extra_valid']:
                logger.debug("Master requires some worker files.")
                asyncio.create_task(self.sync_extra_valid(ko_files['extra_valid']))
            else:
                logger.info(
                    f"Finished in {datetime.utcnow().timestamp() - self.integrity_sync_status['date_start']:.3f}s.")

        except exception.WazuhException as e:
            logger.error(f"Error synchronizing extra valid files: {e}")
            await self.send_request(command=b'syn_i_w_m_r',
                                    data=b'None ' + json.dumps(e, cls=c_common.WazuhJSONEncoder).encode())
        except Exception as e:
            logger.error(f"Error synchronizing extra valid files: {e}")
            exc_info = json.dumps(exception.WazuhClusterError(1000, extra_message=str(e)),
                                  cls=c_common.WazuhJSONEncoder)
            await self.send_request(command=b'syn_i_w_m_r', data=b'None ' + exc_info.encode())
        finally:
            zip_path and shutil.rmtree(zip_path)