class TaskPool(object):
    def __init__(self, loop, num_workers):
        self.loop = loop
        self.tasks = Queue(loop=self.loop)
        self.workers = []
        for _ in range(num_workers):
            worker = asyncio.ensure_future(self.worker(), loop=self.loop)
            self.workers.append(worker)

    async def worker(self):
        while True:
            future, task = await self.tasks.get()
            if task is TERMINATOR:
                break
            result = await asyncio.wait_for(task, None, loop=self.loop)
            future.set_result(result)

    def submit(self, task):
        future = asyncio.Future(loop=self.loop)
        self.tasks.put_nowait((future, task))
        return future

    async def join(self):
        for _ in self.workers:
            self.tasks.put_nowait((None, TERMINATOR))
        await asyncio.gather(*self.workers, loop=self.loop)
Example #2
0
 def start(self, loop: asyncio.AbstractEventLoop):
     if hasattr(self, 'started') and self.started:
         # prevent a backend callback from starting more than 1 writer and creating more than 1 queue
         return
     self.queue = Queue()
     loop.create_task(self.writer())
     self.started = True
Example #3
0
class BackendQueue:
    def start(self, loop: asyncio.AbstractEventLoop):
        if hasattr(self, 'started') and self.started:
            # prevent a backend callback from starting more than 1 writer and creating more than 1 queue
            return
        self.queue = Queue()
        loop.create_task(self.writer())
        self.started = True

    async def writer(self):
        raise NotImplementedError

    @asynccontextmanager
    async def read_queue(self):
        update = await self.queue.get()
        yield update
        self.queue.task_done()

    @asynccontextmanager
    async def read_many_queue(self, count: int):
        ret = []
        counter = 0
        while counter < count:
            update = await self.queue.get()
            ret.append(update)
            counter += 1

        yield ret

        for _ in range(count):
            self.queue.task_done()
Example #4
0
    async def _run(self):
        self.logger.debug("Spider Task Start")

        self.proxy = await self.proxy_gener.__anext__()

        self.url_task_queue = Queue(30)

        start_time = datetime.datetime.now()
        tasks = []

        print_log = asyncio.ensure_future(self._auto_print_log())

        self.logger.debug("Create Crawl Tasks")

        crawl_task = asyncio.ensure_future(self._run_crawler(0))

        await self._add_url_to_queue()
        await asyncio.sleep(5)
        while not self.url_task_queue.empty() or self.count != 0:
            await asyncio.sleep(5)
        self.finished = True
        await crawl_task
        self.logger.critical("Simpyder任务执行完毕")
        end_time = datetime.datetime.now()
        delta_time = end_time - start_time
        self.logger.critical('累计消耗时间:% s' % str(delta_time))
        self.logger.critical('累计爬取链接:% s' % str(self._url_count))
        self.logger.critical('累计生成对象:% s' % str(self._item_count))

        await print_log
        await self.session.close()
Example #5
0
async def test_commit_concurrency(aconn):
    # Check the condition reported in psycopg2#103
    # Because of bad status check, we commit even when a commit is already on
    # its way. We can detect this condition by the warnings.
    notices = Queue()
    aconn.add_notice_handler(
        lambda diag: notices.put_nowait(diag.message_primary))
    stop = False

    async def committer():
        nonlocal stop
        while not stop:
            await aconn.commit()
            await asyncio.sleep(0)  # Allow the other worker to work

    async def runner():
        nonlocal stop
        cur = aconn.cursor()
        for i in range(1000):
            await cur.execute("select %s;", (i, ))
            await aconn.commit()

        # Stop the committer thread
        stop = True

    await asyncio.gather(committer(), runner())
    assert notices.empty(), "%d notices raised" % notices.qsize()
Example #6
0
class QueueMailBox(MailBox):
    def __init__(self, name, *args, **kwargs):
        super(QueueMailBox, self).__init__(*args, **kwargs)
        self._name = name
        self._queue = Queue()
        self._ready = True

    async def prepare(self):
        self._ready = True

    async def put(self, msg=None):
        if await self.policy():
            self._queue.put_nowait(msg)

    async def size(self):
        return self._queue.qsize()

    async def empty(self):
        return self._queue.empty()

    async def get(self):
        result = None
        result = await self._queue.get()
        return result

    async def policy(self):
        mem_percent = psutil.virtual_memory().percent
        if mem_percent > 80:
            logger.warning("memory usage is gt than 80")
        return True
Example #7
0
class Common():
    id_queue = Queue()
    request_queue = Queue()
    picture_queue = Queue()
    request_and_parse_queue = Queue()
    CITY_LIST = ['武汉','宜昌','黄石','十堰','襄阳','鄂州','荆州','荆门','黄冈','咸宁','孝感','随州','恩施','神农架','潜江', '天门', '仙桃']
    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'}
Example #8
0
async def main():

    pictures_queue = Queue()
    workers_count = 300
    connection = {
        'user': '******',  # input your postgres username
        'database': 'your database name',  # input your database name here
        'host': '127.0.0.1',  # change your host if it's not local
        'password': '******'  # input your password for this database
    }
    dsn = 'postgresql://{user}:{password}@{host}/{database}'.format(**connection)

    engine = create_engine(dsn)
    result = engine.execute('''select picture from "your_table_name"''')
    res_list = []
    for row in result:
        clean_jpg = row['picture'].split("\n")
        for i in clean_jpg:
            res_list.append(i)
    print(len(res_list))

    for pic in res_list:
        pictures_queue.put_nowait(pic)

    session = AsyncHTMLSession()

    tasks = []
    for num in range(workers_count):
        task = worker(pictures_queue, num, session)
        tasks.append(task)
    await asyncio.gather(*tasks)
Example #9
0
def sending_loop_clients(websocket):
    # create sending-queue
    loop = asyncio.get_event_loop()
    sending_queue_sensors = Queue()
    logger.info('websockets .... smartHome Queue startet')

    def changed(tmp):
        loop.call_soon_threadsafe(sending_queue_sensors.put_nowait, tmp)

    try:
        consumers_clients.append(changed)
        logger.info(
            'websockets .... ein neuer smartHome-Client wurde in die Queue aufgenommen: %s '
            % changed)

        while True:
            tmp_data = yield from sending_queue_sensors.get()
            yield from websocket.send(tmp_data)
            logger.debug(
                'websockets .... Sende json Daten -> smartHome-Client : %s' %
                tmp_data)

    finally:
        consumers_clients.remove(changed)
        logger.info(
            'websockets .... ein smartHome-Client wurde aus der Queue entfernt: %s '
            % changed)
Example #10
0
class TaskPool(object):
    def __init__(self, num_workers):
        self.loop = asyncio.get_event_loop()
        self.tasks = Queue(loop=self.loop)
        self.workers = []
        for _ in range(num_workers):
            worker = asyncio.create_task(self.worker())
            self.workers.append(worker)

    async def worker(self):
        while True:
            future, task = await self.tasks.get()
            if task == "TERMINATOR":
                break
            result = await asyncio.wait_for(task, None, loop=self.loop)
            future.set_result(result)

    def submit(self, task):
        future = asyncio.Future(loop=self.loop)
        self.tasks.put_nowait((future, task))
        return future

    async def close(self):
        for _ in self.workers:
            self.tasks.put_nowait((None, "TERMINATOR"))
        await asyncio.gather(*self.workers, loop=self.loop)
Example #11
0
class ChannelHandler(logging.Handler):
    def __init__(self, bot: discord.Client, cog, cog_name: str,
                 channel: discord.Channel, *args, **kwargs):
        self.cog = cog
        self.cog_name = cog_name
        self.queue = Queue()
        self.channel = channel
        self.bot = bot
        super(ChannelHandler, self).__init__(*args, **kwargs)

    def emit(self, record):
        if self.channel:
            self.queue.put_nowait(record)

    async def update_task(self):
        while self.cog == self.bot.get_cog(self.cog_name):
            if not self.channel:
                await asyncio.sleep(1)
            else:
                record = await self.queue.get()
                if self.cog != self.bot.get_cog(self.cog_name):
                    return
                msg = self.format(record)
                await self.bot.send_message(content=msg,
                                            destination=self.channel)
Example #12
0
 def __init__(self, num_workers):
     self.loop = asyncio.get_event_loop()
     self.tasks = Queue(loop=self.loop)
     self.workers = []
     for _ in range(num_workers):
         worker = asyncio.create_task(self.worker())
         self.workers.append(worker)
Example #13
0
    def __init__(self, timeout=10, loop=None):
        self.timeout = timeout
        super().__init__(asyncio.StreamReader(), self.client_connected, loop)

        self.close_code = None
        self.close_reason = ''

        # Futures tracking steps in the connection's lifecycle.
        self.opening_handshake = asyncio.Future()
        self.closing_handshake = asyncio.Future()
        self.connection_closed = asyncio.Future()

        # Queue of received messages.
        self.messages = Queue()

        # Mapping of ping IDs to waiters, in chronological order.
        self.pings = collections.OrderedDict()

        # Task managing the connection.
        self.worker = asyncio.async(self.run())

        # In a subclass implementing the opening handshake, the state will be
        # CONNECTING at this point.
        if self.state == 'OPEN':
            self.opening_handshake.set_result(True)
Example #14
0
class Database:
    def __init__(self, database, loop=None):
        self.database = database
        self.loop = loop if loop else asyncio.get_event_loop()
        self.connection_semaphore = Queue(maxsize=1)
        self.connection_semaphore.put_nowait({})
        self.__connect_ref = None

    async def __acquire_connection(self):
        await self.connection_semaphore.get()
        return sqlite3.connect(self.database, check_same_thread=False)

    async def __call__(self, *args, **kwargs):
        db = await self.__acquire_connection()
        return Connection(db, self)

    async def __aenter__(self):
        db = await self.__acquire_connection()
        conn = Connection(db, self)
        self.__connect_ref = conn
        return conn

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.__connect_ref:
            await self.__connect_ref.close()
 def __init__(self, loop, num_workers):
     self.loop = loop
     self.tasks = Queue(loop=self.loop)
     self.workers = []
     for _ in range(num_workers):
         worker = asyncio.ensure_future(self.worker(), loop=self.loop)
         self.workers.append(worker)
 async def consumer_log_response(self, res_queue: queues.Queue):
     while True:
         print(res_queue.qsize())
         item: requests.Response = await res_queue.get()
         if item is None:
             break
         print(item.url)
         res_queue.task_done()
Example #17
0
 def __init__(self, bot: discord.Client, cog, cog_name: str,
              channel: discord.Channel, *args, **kwargs):
     self.cog = cog
     self.cog_name = cog_name
     self.queue = Queue()
     self.channel = channel
     self.bot = bot
     super(ChannelHandler, self).__init__(*args, **kwargs)
Example #18
0
 def __init__(self, limit: int = 50):
     self._loop = asyncio.get_event_loop()
     self._limit = limit
     self.logger = logging.getLogger(self.__class__.__name__)
     self._pending_queue: Queue = Queue()
     self._done_queue: Queue = Queue()
     self._running_count = 0
     self._closed = False
Example #19
0
class IRCClientProtocol(asyncio.Protocol):
    """Low-level protocol that speaks the client end of IRC.

    This isn't responsible for very much besides the barest minimum definition
    of an IRC client: connecting and responding to PING.

    You probably want `read_message`, or the higher-level client class.
    """

    def __init__(self, loop, nick, password, charset="utf8"):
        self.nick = nick
        self.password = password
        self.charset = charset

        self.buf = b""
        self.message_queue = Queue(loop=loop)
        self.registered = False

    def connection_made(self, transport):
        self.transport = transport
        if self.password:
            self.send_message("PASS", self.password)
        self.send_message("NICK", self.nick)
        self.send_message("USER", "dywypi", "-", "-", "dywypi Python IRC bot")

    def data_received(self, data):
        data = self.buf + data
        while True:
            raw_message, delim, data = data.partition(b"\r\n")
            if not delim:
                # Incomplete message; stop here and wait for more
                self.buf = raw_message
                return

            # TODO valerr
            message = IRCMessage.parse(raw_message.decode(self.charset))
            logger.debug("recv: %r", message)
            self.handle_message(message)

    def handle_message(self, message):
        if message.command == "PING":
            self.send_message("PONG", message.args[-1])

        elif message.command == "RPL_WELCOME":
            # 001, first thing sent after registration
            if not self.registered:
                self.registered = True

        self.message_queue.put_nowait(message)

    def send_message(self, command, *args):
        message = IRCMessage(command, *args)
        logger.debug("sent: %r", message)
        self.transport.write(message.render().encode(self.charset) + b"\r\n")

    @asyncio.coroutine
    def read_message(self):
        return (yield from self.message_queue.get())
Example #20
0
    def __init__(self, id):
        super(Program, self).__init__()

        self.id = id
        self.other_program = None
        self.set_register('p', self.id)
        self.queue = Queue()
        self.values_sent = 0
        self.waiting = False
Example #21
0
 async def _serve(self, websocket, path):
     import websockets
     connection_id = await websocket.recv()
     with self.queue_lock:
         #if connection_id in self._preclosed_connections:
         #    self._preclosed_connections[connection_id].set()
         #    return
         pmqueue = self._pending_message_queues.get(connection_id, None)
         myqueue = Queue()
         if pmqueue is not None:
             events = []
             while 1:
                 try:
                     e = pmqueue.get_nowait()
                 except QueueEmpty:
                     break
                 events.append(e)
             if len(events) > self.CACHE_EVENTS_FIRST + self.CACHE_EVENTS_LAST:
                 events = events[:self.CACHE_EVENTS_FIRST] + \
                     events[-self.CACHE_EVENTS_LAST:]
             for enr, e in enumerate(events):
                 swallow = False
                 if e.get("type", None) == "var":
                     varname = e.get("var", None)
                     if varname is not None:
                         for e2 in events[enr+1:]:
                             if e2.get("type", None) != "var":
                                 continue
                             if e2.get("var", None) != varname:
                                 continue
                             swallow = True
                             break
                 if swallow:
                     continue
                 myqueue.put_nowait(e)
                 pmqueue.put_nowait(e) #put the events back
         if connection_id not in self._message_queue_lists:
             self._message_queue_lists[connection_id] = []
         self._message_queue_lists[connection_id].append(myqueue)
     while True:
         #print("WAIT")
         try:
             message = await myqueue.get()
         except Exception:
             break
         message = json.dumps(message)
         #print("SEND?", message)
         if message is None: #terminating message
             break
         try:
             #print("SEND", message)
             await websocket.send(message)
         except websockets.exceptions.ConnectionClosed:
             break
     with self.queue_lock:
         self._message_queue_lists[connection_id].remove(myqueue)
Example #22
0
    def __init__(self, request_queue_max_size: int=1000, concurrent_number: int=1, delay: int=0):
        self.http_client = AsyncHTTPClient()

        self.request_queue = Queue(request_queue_max_size)  # 请求队列
        self.concurrent_number = concurrent_number  # 请求最大并发数
        self.delay = delay  # 每批请求之间的延时
        self.is_running = False  # 记录采集器的运行状态
        self.pushed_request_count = 0  # 记录采集器队列中已经添加的请求数量
        self.failed_request_count = 0  # 记录失败的请求数量
        self.finished_request_count = 0  # 记录总共完成的请求数量
 async def consumer(self, queue: Queue, session: ClientSession):
     while True:
         action: AiohttpAction = await queue.get()
         try:
             self.task_count += 1
             await action.do_action(session, queue)
         except Exception as ex:
             logger.exception("Queue worker %s caught an exception from %r",
                              self.uid, action)
         queue.task_done()
Example #24
0
 def __init__(self, tag="data", maxsize=1, name=None, loop=None):
     loop = loop if loop is not None else asyncio.get_event_loop()
     self.loop = loop
     self.name = name if name is not None else str(uuid1())
     self._queue = Queue(maxsize, loop=self.loop)
     self.default_value = None
     self.default_value_set = False
     self.connected = False
     self.belong_to_block = None
     self.data_tag = tag
 async def producer_get_site(self, site_queue: queues.Queue,
                             response_queue: queues.Queue):
     while True:
         site: str = await site_queue.get()
         if site is None:
             break
         res = requests.get(site)
         print(res.status_code)
         await response_queue.put(res)
         site_queue.task_done()
     await response_queue.put(None)
Example #26
0
    async def start(self):
        """ 入口方法 """
        # queue必须创建在run()方法内 https://stackoverflow.com/questions/53724665/using-queues-results-in-asyncio-exception-got-future-future-pending-attached
        self.targets_q = Queue()  # url, name
        await self.targets_q.put((self.url, "index"))
        self.running = True

        tasks = []
        for i in range(self.task_count):
            tasks.append(asyncio.create_task(self.dump()))
        for t in tasks:
            await t
Example #27
0
    def __init__(self, crawler_manager: CrawlerManager,
                 downloader_manager: DownloaderManager):
        self.crawler_manager = crawler_manager
        self.downloader_manager = downloader_manager

        self.running_crawler = {}

        self.start_time = int(time.time())

        self.schedule_queue = Queue()

        self.QUIT = False
Example #28
0
class BackendQueue:
    def start(self, loop: asyncio.AbstractEventLoop):
        self.queue = Queue()
        loop.create_task(self.writer())

    async def writer(self):
        raise NotImplementedError

    @asynccontextmanager
    async def read_queue(self):
        update = await self.queue.get()
        yield update
        self.queue.task_done()
Example #29
0
 def start(self, loop: asyncio.AbstractEventLoop, multiprocess=False):
     if hasattr(self, 'started') and self.started:
         # prevent a backend callback from starting more than 1 writer and creating more than 1 queue
         return
     self.multiprocess = multiprocess
     if self.multiprocess:
         self.queue = Pipe(duplex=False)
         self.worker = Process(target=BackendQueue.worker, args=(self.writer,), daemon=True)
         self.worker.start()
     else:
         self.queue = Queue()
         self.worker = loop.create_task(self.writer())
     self.started = True
    def __init__(self, cloud: Cloud):
        """Initialize Google Report State."""
        super().__init__(cloud)
        self._connect_lock = asyncio.Lock()
        self._to_send = Queue(100)
        self._message_sender_task = None
        # Local code waiting for a response
        self._response_handler: Dict[str, asyncio.Future] = {}
        self.register_on_connect(self._async_on_connect)
        self.register_on_disconnect(self._async_on_disconnect)

        # Register start/stop
        cloud.register_on_stop(self.disconnect)
Example #31
0
async def queued_worker_wrapper(
    coroutine_function: Callable[[aiohttp.ClientSession, str, str], Awaitable],
    session: aiohttp.ClientSession,
    queue: Queue,
) -> None:
    while True:
        print("Getting item from queue")
        url, filename = await queue.get()
        print(f"Got {url}, {filename} from queue")
        print(f"Running coroutine for {url}, {filename}")
        await coroutine_function(session, url, filename)
        print(f"Coruoutine finished for {url}, {filename}")
        print("Letting queue know that the task is done")
        queue.task_done()
Example #32
0
    def __init__(self,
                 config: Config,
                 client,
                 mode="gui",
                 weight_table=0,
                 c=10,
                 mc=False):
        """
        :param config:
        :param agent.model.OthelloModel|None model:
        :param TreeNode mtcs_info:
        :parameter OthelloModelAPI api:
        """
        self.config = config
        self.client = client
        self.mode = mode
        self.play_config = self.config.play
        self.weight_table = weight_table
        self.c = c
        self.mc = mc

        # mc_tree
        self.num_tree, self.win_tree, self.policy_tree = createTrees()

        # expanded
        self.expanded = set()  #expanded存p(dict)的set形式
        self.now_expanding = set()

        # threads
        self.prediction_queue = Queue(
            self.play_config.prediction_queue_size)  #并行计算的信息队列queue大小
        self.sem = asyncio.Semaphore(
            self.play_config.parallel_search_num)  #限制并行搜索的线程数
        self.loop = asyncio.get_event_loop()

        # for gui
        if self.mode == 'gui':
            self.thinking_history = None  # for fun
            self.avalable = None
            self.allow_resign = False
        elif self.mode == 'self_play':
            self.moves = []
            self.allow_resign = True
        self.test_mode = False
        # params
        self.running_simulation_num = 0

        # solver
        self.solver = OthelloSolver()  #引入minmax树类
Example #33
0
class Core(object):

    def __init__(self, bot):
        self.bot = bot
        self.timeout = int(self.bot.config.get('timeout'))
        self.ping_queue = Queue(loop=bot.loop)

    def connection_made(self):
        self.bot.loop.call_later(self.timeout, self.check_ping)
        self.ping_queue.put_nowait(self.bot.loop.time())

    def check_ping(self):  # pragma: no cover
        # check if we received a ping
        # reconnect if queue is empty
        self.bot.log.debug(
            'Ping queue size: {}'.format(self.ping_queue.qsize()))
        if self.ping_queue.empty():
            self.bot.loop.call_soon(self.bot.protocol.transport.close)
        else:
            self.bot.loop.call_later(self.timeout, self.check_ping)
        while not self.ping_queue.empty():
            self.ping_queue.get_nowait()

    @event(rfc.PING)
    def pong(self, data):
        """PING reply"""
        self.ping_queue.put_nowait(self.bot.loop.time())
        self.bot.send('PONG ' + data)

    @event(rfc.NEW_NICK)
    def recompile(self, nick=None, new_nick=None, **kw):
        """recompile regexp on new nick"""
        if self.bot.nick == nick.nick:
            self.bot.config['nick'] = new_nick
            self.bot.recompile()

    @event(rfc.ERR_NICK)
    def badnick(self, me=None, nick=None, **kw):
        """Use alt nick on nick error"""
        if me == '*':
            self.bot.set_nick(self.bot.nick + '_')
        self.bot.log.debug('Trying to regain nickname in 30s...')
        self.bot.loop.call_later(30, self.bot.set_nick, self.bot.original_nick)

    @event(rfc.RPL_ENDOFMOTD)
    def autojoin(self, **kw):
        """autojoin at the end of MOTD"""
        self.bot.config['nick'] = kw['me']
        self.bot.recompile()
        channels = utils.as_list(self.bot.config.get('autojoins', []))
        for channel in channels:
            channel = utils.as_channel(channel)
            self.bot.log.info('Trying to join %s', channel)
            self.bot.join(channel)
Example #34
0
    def __init__(self, *,
                 host=None, port=None, secure=None, timeout=10, max_size=2 ** 20, loop=None):
        self.host = host
        self.port = port
        self.secure = secure

        self.timeout = timeout
        self.max_size = max_size

        super().__init__(asyncio.StreamReader(), self.client_connected, loop)

        self.close_code = None
        self.close_reason = ''

        # Futures tracking steps in the connection's lifecycle.
        self.opening_handshake = asyncio.Future()
        self.closing_handshake = asyncio.Future()
        self.connection_failed = asyncio.Future()
        self.connection_closed = asyncio.Future()

        # Queue of received messages.
        self.messages = Queue()

        # Mapping of ping IDs to waiters, in chronological order.
        self.pings = collections.OrderedDict()

        # Task managing the connection.
        self.worker = asyncio.async(self.run())

        # In a subclass implementing the opening handshake, the state will be
        # CONNECTING at this point.
        if self.state == 'OPEN':
            self.opening_handshake.set_result(True)
Example #35
0
    def __init__(self, loop, network):
        self.loop = loop
        self.network = network

        self.joined_channels = {}  # name => Channel

        # IRC server features, as reported by ISUPPORT, with defaults taken
        # from the RFC.
        self.len_nick = 9
        self.len_channel = 200
        self.len_message = 510
        # These lengths don't have limits mentioned in the RFC, so going with
        # the smallest known values in the wild
        self.len_kick = 80
        self.len_topic = 80
        self.len_away = 160
        self.max_watches = 0
        self.max_targets = 1
        self.channel_types = set('#&')
        self.channel_modes = {}  # TODO, haha.
        self.channel_prefixes = {}  # TODO here too.  IRCMode is awkward.
        self.network_title = self.network.name
        self.features = {}

        # Various intermediate state used for waiting for replies and
        # aggregating multi-part replies
        # TODO hmmm so what happens if state just gets left here forever?  do
        # we care?
        self._pending_names = {}
        self._names_futures = {}
        self._pending_topics = {}
        self._join_futures = {}

        self.event_queue = Queue(loop=loop)
Example #36
0
    def __init__(self, loop, nick, password, charset="utf8"):
        self.nick = nick
        self.password = password
        self.charset = charset

        self.buf = b""
        self.message_queue = Queue(loop=loop)
        self.registered = False
Example #37
0
 def __init__(self, sr, sw, *, loop=None):
     if not loop:
         loop = asyncio.get_event_loop()
     self._loop = loop
     self._sr = sr
     self._sw = sw
     self._msgs = Queue(loop=loop)
     self._worker = loop.create_task(self._run())
Example #38
0
class IrcConnection(asyncio.Protocol):
    """asyncio protocol to handle an irc connection"""

    def connection_made(self, transport):
        self.transport = transport
        self.closed = False
        self.queue = Queue()

    def data_received(self, data):
        encoding = getattr(self, 'encoding', 'ascii')
        data = data.decode(encoding, 'ignore')
        if not self.queue.empty():
            data = self.queue.get_nowait() + data
        lines = data.split('\r\n')
        self.queue.put_nowait(lines.pop(-1))
        for line in lines:
            self.factory.dispatch(line)

    def write(self, data):
        if data is not None:
            if isinstance(data, text_type):
                data = data.encode(self.encoding)
            if not data.endswith(b'\r\n'):
                data = data + b'\r\n'
            self.transport.write(data)

    def connection_lost(self, exc):  # pragma: no cover
        self.factory.log.critical('connection lost (%s): %r',
                                  id(self.transport),
                                  exc)
        self.factory.notify('connection_lost')
        if not self.closed:
            self.close()
            # wait a few before reconnect
            self.factory.loop.call_later(
                2, self.factory.create_connection, self.__class__)

    def close(self):  # pragma: no cover
        if not self.closed:
            self.factory.log.critical('closing old transport (%r)',
                                      id(self.transport))
            try:
                self.transport.close()
            finally:
                self.closed = True
Example #39
0
    def __init__(self, loop, network, *args, **kwargs):
        super().__init__(loop, **kwargs)

        self.event_queue = Queue(loop=self.loop)

        self.network = network

        self.me = Peer('dywypi', 'dywypi', 'localhost')
        self.you = Peer('user', 'user', 'localhost')
Example #40
0
    def __init__(self, loop, network):
        self.loop = loop

        # TODO it would be nice to parametrize these (or even accept arbitrary
        # transports), but the event loop doesn't support async reading from
        # ttys for some reason...
        self.stdin = sys.stdin
        self.stdout = sys.stdout

        self.event_queue = Queue(loop=loop)
Example #41
0
def sending_loop_gui(websocket):
    # create sending-queue
    loop = asyncio.get_event_loop()
    sending_queue_gui = Queue()
    logger.info('websockets .... GUI Queue startet')

    def changed(tmp):
        loop.call_soon_threadsafe(sending_queue_gui.put_nowait, tmp)

    try:
        consumers_gui.append(changed)
        logger.info('websockets .... ein GUI-Client wurde in die Queue aufgenommen')

        while True:
            tmp_data = yield from sending_queue_gui.get()
            yield from websocket.send(tmp_data)
            logger.debug('websockets .... Sende json Daten -> GUI : %s' % tmp_data)

    finally:
        consumers_gui.remove(changed)
        logger.info('websockets .... ein GUI-Client wurde aus der Queue entfernt')
Example #42
0
def sending_loop(websocket):
    # create sending-queue
    loop = asyncio.get_event_loop()
    sending_queue = Queue()
    logger.info("websockets .... Queue startet")

    def changed(tmp):
        loop.call_soon_threadsafe(sending_queue.put_nowait, tmp)

    try:
        consumers.append(changed)
        logger.info("websockets .... consumers.append")

        while True:
            tmp_data = yield from sending_queue.get()
            yield from websocket.send(tmp_data)
            logger.debug("websockets .... yield from websocket.send : %s" % tmp_data)

    finally:
        consumers.remove(changed)
        logger.info("websockets .... consumers.remove")
Example #43
0
    async def _handler(self, websocket, path):
        print('[WebSocketThread] Incoming connection')
        queue = Queue()

        async def send_message_async(message):
            await queue.put(message)

        def send_message(message):
            asyncio.run_coroutine_threadsafe(send_message_async(message), self._loop)

        def close():
            send_message(None)

        on_open, on_message, on_close = self._accept_connection(send_message, close)

        on_open()
        listener_task = asyncio.ensure_future(websocket.recv())
        producer_task = asyncio.ensure_future(queue.get())
        try:
            while True:
                done, pending = await asyncio.wait(
                    [listener_task, producer_task],
                    return_when=asyncio.FIRST_COMPLETED)

                if listener_task in done:
                    message = listener_task.result()
                    on_message(message)
                    listener_task = asyncio.ensure_future(websocket.recv())

                if producer_task in done:
                    message = producer_task.result()
                    if message is None:
                        break
                    producer_task = asyncio.ensure_future(queue.get())
                    await websocket.send(message)
        finally:
            listener_task.cancel()
            producer_task.cancel()
            on_close()
            print('[WebSocketThread] Connection closed')
Example #44
0
    def register(self, channel):
        """
        Handler usage:
        >>> channel = yield from dispatcher.register('foo')
        >>> yield from channel.get()
        >>> channel.close()
        """
        q = Queue()
        self.queues[channel].append(q)

        yield from self.subscription.subscribe([channel])

        def free(*a, **k):
            """
            Stop serving client
            """
            queues = self.queues[channel]
            queues.remove(q)
            if not queues:
                yield from self.subscription.unsubscribe([channel])
                del self.queues[channel]
                logger.info('%s chanel was released', channel)
        q.close = free
        return q
Example #45
0
class ShellClient:
    def __init__(self, loop, network):
        self.loop = loop

        # TODO it would be nice to parametrize these (or even accept arbitrary
        # transports), but the event loop doesn't support async reading from
        # ttys for some reason...
        self.stdin = sys.stdin
        self.stdout = sys.stdout

        self.event_queue = Queue(loop=loop)

    @asyncio.coroutine
    def connect(self):
        self.protocol = UrwidTerminalProtocol(DywypiShell, self.loop)
        self.transport = TrivialFileTransport(self.loop, self.stdin, self.stdout, self.protocol)

    @asyncio.coroutine
    def disconnect(self):
        self.protocol.bridge.stop()

    @asyncio.coroutine
    def read_event(self):
        # For now, this will never ever do anything.
        # TODO this sure looks a lot like IRCClient
        return (yield from self.event_queue.get())

    def format_transition(self, current_style, new_style):
        if new_style == Style.default():
            # Just use the reset sequence
            return "\x1b[0m"

        ret = ""
        if new_style.fg != current_style.fg:
            ret += FOREGROUND_CODES[new_style.fg]

        if new_style.bold != current_style.bold:
            ret += BOLD_CODES[new_style.bold]

        return ret
Example #46
0
    def __init__(self, timeout=10):
        self.timeout = timeout

        self.close_code = None
        self.close_reason = ''

        # Futures tracking steps in the connection's lifecycle.
        self.opening_handshake = asyncio.Future()
        self.closing_handshake = asyncio.Future()
        self.connection_closed = asyncio.Future()

        # Queue of received messages.
        self.messages = Queue()

        # Mapping of ping IDs to waiters, in chronological order.
        self.pings = collections.OrderedDict()

        # Task managing the connection.
        self.worker = asyncio.async(self.run())

        # In a subclass implementing the opening handshake, the state will be
        # CONNECTING at this point.
        if self.state == 'OPEN':
            self.opening_handshake.set_result(True)
Example #47
0
 def connection_made(self, transport):
     self.transport = transport
     self.queue = Queue(loop=self.loop)
     self.loop.create_task(handle_requests(self.queue, transport))
Example #48
0
    class HttpProtocol(asyncio.Protocol):
        def __init__(self, loop, handler):
            self.parser = cparser.HttpRequestParser(
                self.on_headers, self.on_body, self.on_error)
            self.loop = loop
            self.response = Response()

        if flavor == 'queue':
            def connection_made(self, transport):
                self.transport = transport
                self.queue = Queue(loop=self.loop)
                self.loop.create_task(handle_requests(self.queue, transport))
        else:
            def connection_made(self, transport):
                self.transport = transport

        def connection_lost(self, exc):
            self.parser.feed_disconnect()

        def data_received(self, data):
            self.parser.feed(data)

        def on_headers(self, request):
            return

        if flavor == 'block':
            def on_body(self, request):
                handle_request_block(request, self.transport, self.response)
        elif flavor == 'dump':
            def on_body(self, request):
                handle_dump(request, self.transport, self.response)
        elif flavor == 'task':
            def on_body(self, request):
                self.loop.create_task(handle_request(request, self.transport))
        elif flavor == 'queue':
            def on_body(self, request):
                self.queue.put_nowait(request)
        elif flavor == 'inline':
            def on_body(self, request):
                body = 'Hello inlin!'
                status_code = 200
                mime_type = 'text/plain'
                encoding = 'utf-8'
                text = [b'HTTP/1.1 ']
                text.extend([str(status_code).encode(), b' OK\r\n'])
                text.append(b'Connection: keep-alive\r\n')
                text.append(b'Content-Length: ')
                text.extend([str(len(body)).encode(), b'\r\n'])
                text.extend([
                    b'Content-Type: ', mime_type.encode(),
                    b'; encoding=', encoding.encode(), b'\r\n\r\n'])
                text.append(body.encode())

                self.transport.write(b''.join(text))

        elif flavor == 'static':
            def on_body(self, request):
                self.transport.write(static_response)

        def on_error(self, error):
            print(error)
Example #49
0
class IRCClient:
    """Higher-level IRC client.  Takes care of most of the hard parts of IRC:
    incoming server messages are bundled into more intelligible events (see
    ``dywypi.event``), and commands that expect replies are implemented as
    coroutines.
    """

    def __init__(self, loop, network):
        self.loop = loop
        self.network = network

        self.joined_channels = {}  # name => Channel

        # IRC server features, as reported by ISUPPORT, with defaults taken
        # from the RFC.
        self.len_nick = 9
        self.len_channel = 200
        self.len_message = 510
        # These lengths don't have limits mentioned in the RFC, so going with
        # the smallest known values in the wild
        self.len_kick = 80
        self.len_topic = 80
        self.len_away = 160
        self.max_watches = 0
        self.max_targets = 1
        self.channel_types = set('#&')
        self.channel_modes = {}  # TODO, haha.
        self.channel_prefixes = {}  # TODO here too.  IRCMode is awkward.
        self.network_title = self.network.name
        self.features = {}

        # Various intermediate state used for waiting for replies and
        # aggregating multi-part replies
        # TODO hmmm so what happens if state just gets left here forever?  do
        # we care?
        self._pending_names = {}
        self._names_futures = {}
        self._pending_topics = {}
        self._join_futures = {}

        self.event_queue = Queue(loop=loop)

    def get_channel(self, channel_name):
        """Returns a `Channel` object containing everything the client
        definitively knows about the given channel.

        Note that if you, say, ask for the topic of a channel you aren't in and
        then immediately call `get_channel`, the returned object won't have its
        topic populated.  State is only tracked persistently for channels the
        bot is in; otherwise there's no way to know whether or not it's stale.
        """
        if channel_name in self.joined_channels:
            return self.joined_channels[channel_name]
        else:
            return IRCChannel(self, channel_name)

    @asyncio.coroutine
    def connect(self):
        """Coroutine for connecting to a single server.

        Note that this will nonblock until the client is "registered", defined
        as the first PING/PONG exchange.
        """
        # TODO this is a poor excuse for round-robin  :)
        server = self.current_server = self.network.servers[0]

        # TODO i'm pretty sure the server tells us what our nick is, and we
        # should believe that instead
        self.nick = self.network.preferred_nick

        # TODO: handle disconnection, somehow.  probably affects a lot of
        # things.
        # TODO kind of wish this weren't here, since the creation of the
        # connection isn't inherently part of a client.  really it should be on
        # the...  network, perhaps?  and there's no reason i shouldn't be able
        # to "connect" to a unix socket or pipe or anywhere else that has data.
        _, self.proto = yield from self.loop.create_connection(
            lambda: IRCClientProtocol(
                self.loop, self.network.preferred_nick, password=server.password),
            server.host, server.port, ssl=server.tls)

        while True:
            yield from self._read_message()
            # TODO this is dumb garbage; more likely this client itself should
            # just wait for 001/RPL_WELCOME.
            if self.proto.registered:
                break

        # Start the event loop as soon as we've synched, or we can't respond to
        # anything
        asyncio.async(self._advance(), loop=self.loop)

        # Initial joins
        yield from asyncio.gather(*[
            self.join(channel_name)
            for channel_name in self.network.autojoins
        ], loop=self.loop)

    @asyncio.coroutine
    def disconnect(self):
        self.proto.send_message('QUIT', 'Seeya!')
        self.proto.transport.close()

    @asyncio.coroutine
    def _advance(self):
        """Internal coroutine that just keeps the protocol message queue going.
        Called once after a connect and should never be called again after
        that.
        """
        # TODO this is currently just to keep the message queue going, but
        # eventually it should turn them into events and stuff them in an event
        # queue
        yield from self._read_message()

        asyncio.async(self._advance(), loop=self.loop)

    @asyncio.coroutine
    def _read_message(self):
        """Internal dispatcher for messages received from the protocol."""
        message = yield from self.proto.read_message()

        # TODO there is a general ongoing problem here with matching up
        # responses.  ESPECIALLY when error codes are possible.  something here
        # is gonna have to get a bit fancier.  maybe it should live at the
        # protocol level, actually...?

        # Boy do I ever hate this pattern but it's slightly more maintainable
        # than a 500-line if tree.
        handler = getattr(self, '_handle_' + message.command, None)
        if handler:
            handler(message)

    def _handle_RPL_ISUPPORT(self, message):
        me, *features, human_text = message.args
        for feature_string in features:
            feature, _, value = feature_string.partition('=')
            if value is None:
                value = True

            self.features[feature] = value

            if feature == 'NICKLEN':
                self.len_nick = int(value)
            elif feature == 'CHANNELLEN':
                self.len_channel = int(value)
            elif feature == 'KICKLEN':
                self.len_kick = int(value)
            elif feature == 'TOPICLEN':
                self.len_topic = int(value)
            elif feature == 'AWAYLEN':
                self.len_away = int(value)
            elif feature == 'WATCH':
                self.max_watches = int(value)
            elif feature == 'CHANTYPES':
                self.channel_types = set(value)
            elif feature == 'PREFIX':
                # List of channel user modes, in relative priority order, in
                # the format (ov)@+
                assert value[0] == '('
                letters, symbols = value[1:].split(')')
                assert len(letters) == len(symbols)
                self.channel_prefixes.clear()
                for letter, symbol in zip(letters, symbols):
                    mode = IRCMode(letter, prefix=symbol)
                    self.channel_modes[letter] = mode
                    self.channel_prefixes[symbol] = mode
            elif feature == 'MAXTARGETS':
                self.max_targets = int(value)
            elif feature == 'CHANMODES':
                # Four groups delimited by lists: list-style (+b), arg required
                # (+k), arg required only to set (+l), argless
                lists, args, argsets, argless = value.split(',')
                for letter in lists:
                    self.channel_modes[letter] = IRCMode(
                        letter, multi=True)
                for letter in args:
                    self.channel_modes[letter] = IRCMode(
                        letter, arg_on_set=True, arg_on_remove=True)
                for letter in argsets:
                    self.channel_modes[letter] = IRCMode(
                        letter, arg_on_set=True)
                for letter in argless:
                    self.channel_modes[letter] = IRCMode(letter)
            elif feature == 'NETWORK':
                self.network_title = value

    def _handle_JOIN(self, message):
        channel_name, = message.args
        joiner = Peer.from_prefix(message.prefix)
        # TODO should there be a self.me?  how...
        if joiner.name == self.nick:
            # We just joined a channel
            #assert channel_name not in self.joined_channels
            # TODO key?  do we care?
            # TODO what about channel configuration and anon non-joined
            # channels?  how do these all relate...
            channel = IRCChannel(self, channel_name)
            self.joined_channels[channel.name] = channel
        else:
            # Someone else just joined the channel
            self.joined_channels[channel_name].add_user(joiner)

    def _handle_RPL_TOPIC(self, message):
        # Topic.  Sent when joining or when requesting the topic.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, topic_text = message.args
        self._pending_topics[channel_name] = IRCTopic(topic_text)

    def _handle_RPL_TOPICWHOTIME(self, message):
        # Topic author (NONSTANDARD).  Sent after RPL_TOPIC.
        # Unfortunately, there's no way to know whether to expect this.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, author, timestamp = message.args
        topic = self._pending_topics.setdefault(channel_name, IRCTopic(''))
        topic.author = Peer.from_prefix(author)
        topic.timestamp = datetime.utcfromtimestamp(int(timestamp))

    def _handle_RPL_NAMREPLY(self, message):
        # Names response.  Sent when joining or when requesting a names
        # list.  Must be ended with a RPL_ENDOFNAMES.
        me, useless_equals_sign, channel_name, *raw_names = message.args
        # List of names is actually optional (?!)
        if raw_names:
            raw_names = raw_names[0]
        else:
            raw_names = ''

        names = raw_names.strip(' ').split(' ')
        namelist = self._pending_names.setdefault(channel_name, [])
        # TODO modes?  should those be stripped off here?
        # TODO for that matter should these become peers here?
        namelist.extend(names)

    def _handle_RPL_ENDOFNAMES(self, message):
        # End of names list.  Sent at the very end of a join or the very
        # end of a NAMES request.
        me, channel_name, info = message.args
        namelist = self._pending_names.pop(channel_name, [])

        if channel_name in self._names_futures:
            # TODO we should probably not ever have a names future AND a
            # pending join at the same time.  or, does it matter?
            self._names_futures[channel_name].set_result(namelist)
            del self._names_futures[channel_name]

        if channel_name in self.joined_channels:
            # Join synchronized!
            channel = self.joined_channels[channel_name]
            channel.sync = True

            channel.topic = self._pending_topics.pop(channel_name, None)

            for name in namelist:
                modes = set()
                # TODO use features!
                while name and name[0] in '+%@&~':
                    modes.add(name[0])
                    name = name[1:]

                # TODO haha no this is so bad.
                # TODO the bot should, obviously, keep a record of all
                # known users as well.  alas, mutable everything.
                peer = Peer(name, None, None)

                channel.add_user(peer, modes)

            if channel_name in self._join_futures:
                # Update the Future
                self._join_futures[channel_name].set_result(channel)
                del self._join_futures[channel_name]

    def _handle_PRIVMSG(self, message):
        event = Message(self, message)
        self.event_queue.put_nowait(event)

    @asyncio.coroutine
    def read_event(self):
        """Produce a single IRC event.

        This client does not do any kind of multiplexing or event handler
        notification; that's left to a higher level.
        """
        return (yield from self.event_queue.get())


    # Implementations of particular commands

    # TODO should this be part of the general client interface, or should there
    # be a separate thing that smooths out the details?
    @asyncio.coroutine
    def say(self, target, message):
        """Coroutine that sends a message to a target, which may be either a
        `Channel` or a `Peer`.
        """
        yield from self.send_message('PRIVMSG', target, message)

    def join(self, channel_name, key=None):
        """Coroutine that joins a channel, and nonblocks until the join is
        "synchronized" (defined as receiving the nick list).
        """
        if channel_name in self._join_futures:
            return self._join_futures[channel_name]

        # TODO multiple?  error on commas?
        if key is None:
            self.proto.send_message('JOIN', channel_name)
        else:
            self.proto.send_message('JOIN', channel_name, key)

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._join_futures[channel_name] = asyncio.Future()
        return fut

    def names(self, channel_name):
        """Coroutine that returns a list of names in a channel."""
        self.proto.send_message('NAMES', channel_name)

        # No need to do the same thing twice
        if channel_name in self._names_futures:
            return self._names_futures[channel_name]

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._names_futures[channel_name] = asyncio.Future()
        return fut

    def set_topic(self, channel, topic):
        """Sets the channel topic."""
        self.proto.send_message('TOPIC', channel, topic)

    @asyncio.coroutine
    def send_message(self, command, *args):
        self.proto.send_message(command, *args)

    def format_transition(self, current_style, new_style):
        if new_style == Style.default():
            # Reset code, ^O
            return '\x0f'

        if new_style.fg != current_style.fg and new_style.fg is Color.default:
            # IRC has no "reset to default" code.  mIRC claims color 99 is for
            # this, but it lies, at least in irssi.  So we must reset and
            # reapply everything.
            ret = '\x0f'
            if new_style.bold is Bold.on:
                ret += '\x02'
            return ret

        ret = ''
        if new_style.fg != current_style.fg:
            ret += FOREGROUND_CODES[new_style.fg]

        if new_style.bold != current_style.bold:
            # There's no on/off for bold, just a toggle
            ret += '\x02'

        return ret
Example #50
0
 def connection_made(self, transport):
     self.transport = transport
     self.closed = False
     self.queue = Queue()
Example #51
0
class WebSocketCommonProtocol(asyncio.StreamReaderProtocol):
    """
    This class implements common parts of the WebSocket protocol.

    It assumes that the WebSocket connection is established. The handshake is
    managed in subclasses such as
    :class:`~websockets.server.WebSocketServerProtocol` and
    :class:`~websockets.client.WebSocketClientProtocol`.

    It runs a task that stores incoming data frames in a queue and deals with
    control frames automatically. It sends outgoing data frames and performs
    the closing handshake.

    The `host`, `port` and `secure` parameters are simply stored as attributes
    for handlers that need them.

    The `timeout` parameter defines the maximum wait time in seconds for
    completing the closing handshake and, only on the client side, for
    terminating the TCP connection. :meth:`close()` will complete in at most
    this time on the server side and twice this time on the client side.

    The `max_size` parameter enforces the maximum size for incoming messages
    in bytes. The default value is 1MB. ``None`` disables the limit. If a
    message larger than the maximum size is received, :meth:`recv()` will
    return ``None`` and the connection will be closed with status code 1009.

    Once the connection is closed, the status code is available in the
    :attr:`close_code` attribute and the reason in :attr:`close_reason`.
    """

    # There are only two differences between the client-side and the server-
    # side behavior: masking the payload and closing the underlying TCP
    # connection. This class implements the server-side behavior by default.
    # To get the client-side behavior, set is_client = True.

    is_client = False
    state = 'OPEN'

    def __init__(self, *,
                 host=None, port=None, secure=None, timeout=10, max_size=2 ** 20, loop=None):
        self.host = host
        self.port = port
        self.secure = secure

        self.timeout = timeout
        self.max_size = max_size

        super().__init__(asyncio.StreamReader(), self.client_connected, loop)

        self.close_code = None
        self.close_reason = ''

        # Futures tracking steps in the connection's lifecycle.
        self.opening_handshake = asyncio.Future()
        self.closing_handshake = asyncio.Future()
        self.connection_failed = asyncio.Future()
        self.connection_closed = asyncio.Future()

        # Queue of received messages.
        self.messages = Queue()

        # Mapping of ping IDs to waiters, in chronological order.
        self.pings = collections.OrderedDict()

        # Task managing the connection.
        self.worker = asyncio.async(self.run())

        # In a subclass implementing the opening handshake, the state will be
        # CONNECTING at this point.
        if self.state == 'OPEN':
            self.opening_handshake.set_result(True)

    # Public API

    @property
    def open(self):
        """
        This property is ``True`` when the connection is usable.

        It may be used to handle disconnections gracefully.
        """
        return self.state == 'OPEN'

    @asyncio.coroutine
    def close(self, code=1000, reason=''):
        """
        This coroutine performs the closing handshake.

        This is the expected way to terminate a connection on the server side.

        It waits for the other end to complete the handshake. It doesn't do
        anything once the connection is closed.

        It's usually safe to wrap this coroutine in `asyncio.async()` since
        errors during connection termination aren't particularly useful.

        The `code` must be an :class:`int` and the `reason` a :class:`str`.
        """
        if self.state == 'OPEN':
            # 7.1.2. Start the WebSocket Closing Handshake
            self.close_code, self.close_reason = code, reason
            yield from self.write_frame(OP_CLOSE, serialize_close(code, reason))
            # 7.1.3. The WebSocket Closing Handshake is Started
            self.state = 'CLOSING'

        # If the connection doesn't terminate within the timeout, break out of
        # the worker loop.
        try:
            yield from asyncio.wait_for(self.worker, timeout=self.timeout)
        except asyncio.TimeoutError:
            self.worker.cancel()

        # The worker should terminate quickly once it has been cancelled.
        yield from self.worker

    @asyncio.coroutine
    def recv(self):
        """
        This coroutine receives the next message.

        It returns a :class:`str` for a text frame and :class:`bytes` for a
        binary frame.

        When the end of the message stream is reached, or when a protocol
        error occurs, :meth:`recv` returns ``None``, indicating that the
        connection is closed.
        """
        # Return any available message
        try:
            return self.messages.get_nowait()
        except QueueEmpty:
            pass

        # Wait for a message until the connection is closed
        next_message = asyncio.async(self.messages.get())
        done, pending = yield from asyncio.wait(
                [next_message, self.worker],
                return_when=asyncio.FIRST_COMPLETED)
        if next_message in done:
            return next_message.result()
        else:
            next_message.cancel()

    @asyncio.coroutine
    def send(self, data):
        """
        This coroutine sends a message.

        It sends a :class:`str` as a text frame and :class:`bytes` as a binary
        frame.

        It raises a :exc:`TypeError` for other inputs and
        :exc:`InvalidState` once the connection is closed.
        """
        if isinstance(data, str):
            opcode = 1
            data = data.encode('utf-8')
        elif isinstance(data, bytes):
            opcode = 2
        else:
            raise TypeError("data must be bytes or str")
        yield from self.write_frame(opcode, data)

    @asyncio.coroutine
    def ping(self, data=None):
        """
        This coroutine sends a ping.

        It returns a Future which will be completed when the corresponding
        pong is received and which you may ignore if you don't want to wait.

        A ping may serve as a keepalive.
        """
        # Protect against duplicates if a payload is explicitly set.
        if data in self.pings:
            raise ValueError("Already waiting for a pong with the same data")
        # Generate a unique random payload otherwise.
        while data is None or data in self.pings:
            data = struct.pack('!I', random.getrandbits(32))

        self.pings[data] = asyncio.Future()
        yield from self.write_frame(OP_PING, data)
        return self.pings[data]

    @asyncio.coroutine
    def pong(self, data=b''):
        """
        This coroutine sends a pong.

        An unsolicited pong may serve as a unidirectional heartbeat.
        """
        yield from self.write_frame(OP_PONG, data)

    # Private methods - no guarantees.

    @asyncio.coroutine
    def run(self):
        # This coroutine guarantees that the connection is closed at exit.
        yield from self.opening_handshake
        while not self.closing_handshake.done():
            try:
                msg = yield from self.read_message()
                if msg is None:
                    break
                self.messages.put_nowait(msg)
            except asyncio.CancelledError:
                break
            except WebSocketProtocolError:
                yield from self.fail_connection(1002)
            except asyncio.IncompleteReadError:
                yield from self.fail_connection(1006)
            except UnicodeDecodeError:
                yield from self.fail_connection(1007)
            except PayloadTooBig:
                yield from self.fail_connection(1009)
            except Exception:
                yield from self.fail_connection(1011)
                raise
        yield from self.close_connection()

    @asyncio.coroutine
    def read_message(self):
        # Reassemble fragmented messages.
        frame = yield from self.read_data_frame(max_size=self.max_size)
        if frame is None:
            return
        if frame.opcode == OP_TEXT:
            text = True
        elif frame.opcode == OP_BINARY:
            text = False
        else:   # frame.opcode == OP_CONT
            raise WebSocketProtocolError("Unexpected opcode")

        # Shortcut for the common case - no fragmentation
        if frame.fin:
            return frame.data.decode('utf-8') if text else frame.data

        # 5.4. Fragmentation
        chunks = []
        max_size = self.max_size
        if text:
            decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
            if max_size is None:
                def append(frame):
                    nonlocal chunks
                    chunks.append(decoder.decode(frame.data, frame.fin))
            else:
                def append(frame):
                    nonlocal chunks, max_size
                    chunks.append(decoder.decode(frame.data, frame.fin))
                    max_size -= len(frame.data)
        else:
            if max_size is None:
                def append(frame):
                    nonlocal chunks
                    chunks.append(frame.data)
            else:
                def append(frame):
                    nonlocal chunks, max_size
                    chunks.append(frame.data)
                    max_size -= len(frame.data)
        append(frame)

        while not frame.fin:
            frame = yield from self.read_data_frame(max_size=max_size)
            if frame is None:
                raise WebSocketProtocolError("Incomplete fragmented message")
            if frame.opcode != OP_CONT:
                raise WebSocketProtocolError("Unexpected opcode")
            append(frame)

        return ('' if text else b'').join(chunks)

    @asyncio.coroutine
    def read_data_frame(self, max_size):
        # Deal with control frames automatically and return next data frame.
        # 6.2. Receiving Data
        while True:
            frame = yield from self.read_frame(max_size)
            # 5.5. Control Frames
            if frame.opcode == OP_CLOSE:
                self.close_code, self.close_reason = parse_close(frame.data)
                if self.state != 'CLOSING':
                    # 7.1.3. The WebSocket Closing Handshake is Started
                    self.state = 'CLOSING'
                    yield from self.write_frame(OP_CLOSE, frame.data, 'CLOSING')
                if not self.closing_handshake.done():
                    self.closing_handshake.set_result(True)
                return
            elif frame.opcode == OP_PING:
                # Answer pings.
                yield from self.pong(frame.data)
            elif frame.opcode == OP_PONG:
                # Do not acknowledge pings on unsolicited pongs.
                if frame.data in self.pings:
                    # Acknowledge all pings up to the one matching this pong.
                    ping_id = None
                    while ping_id != frame.data:
                        ping_id, waiter = self.pings.popitem(0)
                        if not waiter.cancelled():
                            waiter.set_result(None)
            # 5.6. Data Frames
            else:
                return frame

    @asyncio.coroutine
    def read_frame(self, max_size):
        is_masked = not self.is_client
        frame = yield from read_frame(self.reader.readexactly, is_masked, max_size=max_size)
        side = 'client' if self.is_client else 'server'
        logger.debug("%s << %s", side, frame)
        return frame

    @asyncio.coroutine
    def write_frame(self, opcode, data=b'', expected_state='OPEN'):
        # This may happen if a user attempts to write on a closed connection.
        if self.state != expected_state:
            raise InvalidState("Cannot write to a WebSocket "
                               "in the {} state".format(self.state))
        frame = Frame(True, opcode, data)
        side = 'client' if self.is_client else 'server'
        logger.debug("%s >> %s", side, frame)
        is_masked = self.is_client
        write_frame(frame, self.writer.write, is_masked)
        try:
            # Handle flow control automatically.
            yield from self.writer.drain()
        except ConnectionResetError:
            # Terminate the connection if the socket died,
            # unless it's already being closed.
            if expected_state != 'CLOSING':
                self.state = 'CLOSING'
                yield from self.fail_connection(1006)

    @asyncio.coroutine
    def close_connection(self):
        # 7.1.1. Close the WebSocket Connection
        if self.state == 'CLOSED':
            return

        # Defensive assertion for protocol compliance.
        if self.state != 'CLOSING':                         # pragma: no cover
            raise InvalidState("Cannot close a WebSocket connection "
                               "in the {} state".format(self.state))

        if self.is_client:
            try:
                yield from asyncio.wait_for(self.connection_closed,
                        timeout=self.timeout)
            except (asyncio.CancelledError, asyncio.TimeoutError):
                pass

            if self.state == 'CLOSED':
                return

        # Attempt to terminate the TCP connection properly.
        # If the socket is already closed, this will crash.
        try:
            if self.writer.can_write_eof():
                self.writer.write_eof()
        except Exception:
            pass

        self.writer.close()

        try:
            yield from asyncio.wait_for(self.connection_closed,
                    timeout=self.timeout)
        except (asyncio.CancelledError, asyncio.TimeoutError):
            pass

    @asyncio.coroutine
    def fail_connection(self, code=1011, reason=''):
        # Avoid calling fail_connection more than once to minimize
        # the consequences of race conditions between the two sides.
        if self.connection_failed.done():
            # Wait until the other coroutine calls connection_lost.
            yield from self.connection_closed
            return
        else:
            self.connection_failed.set_result(None)

        # Losing the connection usually results in a protocol error.
        # Preserve the original error code in this case.
        if self.close_code != 1006:
            self.close_code, self.close_reason = code, reason
        # 7.1.7. Fail the WebSocket Connection
        logger.info("Failing the WebSocket connection: %d %s", code, reason)
        if self.state == 'OPEN':
            yield from self.write_frame(OP_CLOSE, serialize_close(code, reason))
            self.state = 'CLOSING'
        if not self.closing_handshake.done():
            self.closing_handshake.set_result(False)
        yield from self.close_connection()

    # asyncio StreamReaderProtocol methods

    def client_connected(self, reader, writer):
        self.reader = reader
        self.writer = writer

    def connection_lost(self, exc):
        # 7.1.4. The WebSocket Connection is Closed
        self.state = 'CLOSED'
        if not self.connection_closed.done():
            self.connection_closed.set_result(None)
        if self.close_code is None:
            self.close_code = 1006
        super().connection_lost(exc)
Example #52
0
 def __init__(self, loop, network, send=False):
     self.loop = loop
     self.network = network
     self.read_queue = Queue(loop=loop)
     self.send = send #ugh what if i want to RECEIVE though.
Example #53
0
class DCCClient:
    def __init__(self, loop, network, send=False):
        self.loop = loop
        self.network = network
        self.read_queue = Queue(loop=loop)
        self.send = send #ugh what if i want to RECEIVE though.
        #not sure what the use case would be but...?

    @asyncio.coroutine
    def connect(self, port=None):
        if not self.send:
            server = self.current_server = self.network.servers[0]
            self._reader, self._writer = yield from server.connect(self.loop)
            self._read_loop_task = asyncio.Task(self._start_read_loop())
            asyncio.async(self._read_loop_task, loop=self.loop)
        else:
            self._waiting = asyncio.Lock()
            yield from self._waiting.acquire()
            if port:
                self.network = yield from asyncio.start_server(self._handle_client,
                    host=socket.gethostbyname(socket.gethostname()), port=port, loop=self.loop)
            else:
                logger.error("No port provided for send")

    @asyncio.coroutine
    def _handle_client(self, client_reader, client_writer):
        self._reader = client_reader
        self._writer = client_writer
        self._waiting.release()
        self._read_loop_task = asyncio.Task(self._start_read_loop())
        asyncio.async(self._read_loop_task, loop=self.loop)

    @asyncio.coroutine
    def disconnect(self):
        yield from self._writer.drain()
        self._writer.write_eof()

        self._read_loop_task.cancel()
        yield from self._read_loop_task

        while not self._reader.at_eof():
            yield from self._reader.readline()

        if self.send:
            self.network.close()

    @asyncio.coroutine
    def _start_read_loop(self):
        if not self.send: #acks don't really do anything so don't listen for them
            while not self._reader.at_eof():
                try:
                    yield from self._read_message()
                except CancelledError:
                    return
                except Exception:
                    logger.exception("Smothering exception in DCC read loop")

    @asyncio.coroutine
    def _read_message(self):
         line = yield from self._reader.readline()
         m = re.match(b'(.*)(\r|\n|\r\n)$', line)
         assert m
         line = m.group(1)
         message = DCCMessage.parse(line)
         logger.debug("recv: %r", message)
         event = DirectMessage(self, message)
         self.read_queue.put_nowait((message, event))

    @asyncio.coroutine
    def read_event(self):
        message, event = yield from self.read_queue.get()
        return event

    @asyncio.coroutine
    def say(self, message, target=None, no_respond=None):
        self.send_message(message)

    @asyncio.coroutine
    def send_message(self, message):
        message = DCCMessage(message)
        logger.debug("sent: %r", message)
        self._writer.write(message.render().encode('utf8') + b'\r\n')

    @asyncio.coroutine
    def transfer(self, path):
        yield from self._waiting.acquire()
        f = open(str(path), 'rb')
        block = b'\x01'
        while block != b'':
            block = f.read(1024)
            self._writer.write(block)
        f.close()
        self._waiting.release()
        return True
Example #54
0
class StreamConnection:
    def __init__(self, sr, sw, *, loop=None):
        if not loop:
            loop = asyncio.get_event_loop()
        self._loop = loop
        self._sr = sr
        self._sw = sw
        self._msgs = Queue(loop=loop)
        self._worker = loop.create_task(self._run())

    @asyncio.coroutine
    def _run(self):
        while self.alive():
            try:
                data = yield from self._sr.readline()
                if data and len(data):
                    self._msgs.put_nowait(self._convert(data))
            except asyncio.CancelledError:
                logger.debug("readline from stream reader was cancelled.")
            except ConnectionError:
                logger.debug("connection error")
                break
        logger.debug("connection closed")

    def _convert(self, data):
        return data.strip()

    @asyncio.coroutine
    def recv(self):
        try:
            return self._msgs.get_nowait()
        except QueueEmpty:
            pass

        # Wait for a message until the connection is closed
        next_message = self._loop.create_task(self._msgs.get())
        done, pending = yield from asyncio.wait(
                [next_message, self._worker],
                loop=self._loop, return_when=asyncio.FIRST_COMPLETED)
        if next_message in done:
            return next_message.result()
        else:
            next_message.cancel()

    def send(self, data):
        if not self.alive():
            raise ConnectionError("connection was closed.")
        try:
            data = data + b'\n'
            self._sw.write(data)
        except OSError:
            raise ConnectionError("can't send data.")
        except Exception:
            logger.debug("Q___Q")

    def alive(self):
        return not self._sr.at_eof()

    @asyncio.coroutine
    def drain():
        yield from self._sw.drain()
    
    @asyncio.coroutine
    def close(self):
        if self.alive():
            try:
                yield from self._sw.drain()
                self._sw.write_eof()
            except ConnectionError:
                pass
            else:
                self._sr.feed_eof()
                self._sw.close()
        self._worker.cancel()
Example #55
0
class IRCClient:
    """Higher-level IRC client.  Takes care of most of the hard parts of IRC:
    incoming server messages are bundled into more intelligible events (see
    ``dywypi.event``), and commands that expect replies are implemented as
    coroutines.
    """

    def __init__(self, loop, network):
        self.loop = loop
        self.network = network
        # TODO should this be a param?  a property of the network?  or, more
        # likely, channel-specific and decoded separately and...
        self.charset = "utf8"

        self.joined_channels = {}  # name => Channel

        # IRC server features, as reported by ISUPPORT, with defaults taken
        # from the RFC.
        self.len_nick = 9
        self.len_channel = 200
        self.len_message = 510
        # These lengths don't have limits mentioned in the RFC, so going with
        # the smallest known values in the wild
        self.len_kick = 80
        self.len_topic = 80
        self.len_away = 160
        self.max_watches = 0
        self.max_targets = 1
        self.channel_types = set("#&")
        self.channel_modes = {}  # TODO, haha.
        self.channel_prefixes = {}  # TODO here too.  IRCMode is awkward.
        self.network_title = self.network.name
        self.features = {}

        # Various intermediate state used for waiting for replies and
        # aggregating multi-part replies
        # TODO hmmm so what happens if state just gets left here forever?  do
        # we care?
        self._pending_names = {}
        self._names_futures = {}
        self._pending_topics = {}
        self._join_futures = {}

        self._message_waiters = deque()

        self.read_queue = Queue(loop=loop)

    def get_channel(self, channel_name):
        """Returns a `Channel` object containing everything the client
        definitively knows about the given channel.

        Note that if you, say, ask for the topic of a channel you aren't in and
        then immediately call `get_channel`, the returned object won't have its
        topic populated.  State is only tracked persistently for channels the
        bot is in; otherwise there's no way to know whether or not it's stale.
        """
        if channel_name in self.joined_channels:
            return self.joined_channels[channel_name]
        else:
            return IRCChannel(self, channel_name)

    @asyncio.coroutine
    def connect(self):
        """Coroutine for connecting to a single server.

        Note that this will nonblock until the client is "registered", defined
        as the first PING/PONG exchange.
        """
        # TODO this is a poor excuse for round-robin  :)
        server = self.current_server = self.network.servers[0]

        # TODO i'm pretty sure the server tells us what our nick is, and we
        # should believe that instead
        self.nick = self.network.preferred_nick

        # TODO: handle disconnection, somehow.  probably affects a lot of
        # things.
        self._reader, self._writer = yield from server.connect(self.loop)

        if server.password:
            self.send_message("PASS", server.password)
        self.send_message("NICK", self.nick)
        self.send_message("USER", "dywypi", "-", "-", "dywypi Python IRC bot")

        # Start the reader loop, or we can't respond to anything
        self._read_loop_task = asyncio.Task(self._start_read_loop())
        asyncio.async(self._read_loop_task, loop=self.loop)

    @asyncio.coroutine
    def disconnect(self):
        # Quit
        self.send_message("QUIT", "Seeya!")

        # Flush the write buffer
        yield from self._writer.drain()
        self._writer.close()

        # Stop reading events
        self._read_loop_task.cancel()
        # This looks a little funny since this task is already running, but we
        # want to block until it's actually done, which might require dipping
        # back into the event loop
        yield from self._read_loop_task

        # Read until the connection closes
        while not self._reader.at_eof():
            yield from self._reader.readline()

    @asyncio.coroutine
    def _start_read_loop(self):
        """Internal coroutine that just keeps reading from the server in a
        loop.  Called once after a connect and should never be called again
        after that.
        """
        # TODO this is currently just to keep the message queue going, but
        # eventually it should turn them into events and stuff them in an event
        # queue
        while not self._reader.at_eof():
            try:
                yield from self._read_message()
            except CancelledError:
                return
            except Exception:
                log.exception("Smothering exception in IRC read loop")

    @asyncio.coroutine
    def gather_messages(self, *start, finish):
        fut = asyncio.Future()
        messages = {}
        for command in start:
            messages[command] = False
        for command in finish:
            messages[command] = True
        collected = []
        self._message_waiters.append((fut, messages, collected))
        yield from fut
        return collected

    def _possibly_gather_message(self, message):
        if not self._message_waiters:
            return

        # TODO there is a general ongoing problem here with matching up
        # responses.  ESPECIALLY when error codes are possible.  something here
        # is gonna have to get a bit fancier.

        fut, waiting_on, collected = self._message_waiters[0]
        # TODO is it possible for even a PING to appear in the middle of
        # some other response?
        # TODO this is still susceptible to weirdness when there's, say, a
        # queued error response to a PRIVMSG on its way back; it'll look
        # like the call we just made failed, and all the real responses
        # will be dropped.  can we assume some set of error replies ONLY
        # happen in response to sending a message of some kind, maybe?
        # TODO for that matter, where does the error response to a PRIVMSG
        # even go?  the whole problem is that we can't know for sure when
        # it succeeded, unless we put a timeout on every call to say()
        finish = False
        if message.command in waiting_on:
            finish = waiting_on[message.command]
        elif message.is_error:
            # Always consider an error as finishing
            # TODO but we might have gotten this error in response to something
            # else we did before this message...  :S
            if message.command in {"ERR_CANNOTSENDTOCHAN"}:
                # Looks like a PRIVMSG error or similar, so probably not a
                # response to this particular message.
                return
            finish = True
        elif not collected:
            # Got a regular response we weren't expecting, AND this future
            # hasn't started collecting yet -- the response probably just
            # hasn't started coming back yet, so don't do anything yet.
            return

        # If we get here, we expected this response, and should keep
        # feeding into this future.
        collected.append(message)

        if finish:
            # Done, one way or another
            self._message_waiters.popleft()
            if message.is_error:
                fut.set_exception(IRCError(message))
            else:
                fut.set_result(collected)

    @asyncio.coroutine
    def _read_message(self):
        """Internal dispatcher for messages received from the server."""
        line = yield from self._reader.readline()
        assert line.endswith(b"\r\n")
        line = line[:-2]

        # TODO valerr, unicodeerr
        message = IRCMessage.parse(line.decode(self.charset))
        log.debug("recv: %r", message)

        # TODO unclear whether this should go before or after _handle_foo
        self._possibly_gather_message(message)

        # Boy do I ever hate this pattern but it's slightly more maintainable
        # than a 500-line if tree.
        handler = getattr(self, "_handle_" + message.command, None)
        event = None
        if handler:
            event = handler(message)
        self.read_queue.put_nowait((message, event))

    def _handle_PING(self, message):
        # PONG
        self.send_message("PONG", message.args[-1])

    def _handle_RPL_WELCOME(self, message):
        # Initial registration: do autojoins, and any other onconnect work
        for channel_name in self.network.autojoins:
            asyncio.async(self.join(channel_name), loop=self.loop)

    def _handle_RPL_ISUPPORT(self, message):
        me, *features, human_text = message.args
        for feature_string in features:
            feature, _, value = feature_string.partition("=")
            if value is None:
                value = True

            self.features[feature] = value

            if feature == "NICKLEN":
                self.len_nick = int(value)
            elif feature == "CHANNELLEN":
                self.len_channel = int(value)
            elif feature == "KICKLEN":
                self.len_kick = int(value)
            elif feature == "TOPICLEN":
                self.len_topic = int(value)
            elif feature == "AWAYLEN":
                self.len_away = int(value)
            elif feature == "WATCH":
                self.max_watches = int(value)
            elif feature == "CHANTYPES":
                self.channel_types = set(value)
            elif feature == "PREFIX":
                # List of channel user modes, in relative priority order, in
                # the format (ov)@+
                assert value[0] == "("
                letters, symbols = value[1:].split(")")
                assert len(letters) == len(symbols)
                self.channel_prefixes.clear()
                for letter, symbol in zip(letters, symbols):
                    mode = IRCMode(letter, prefix=symbol)
                    self.channel_modes[letter] = mode
                    self.channel_prefixes[symbol] = mode
            elif feature == "MAXTARGETS":
                self.max_targets = int(value)
            elif feature == "CHANMODES":
                # Four groups delimited by lists: list-style (+b), arg required
                # (+k), arg required only to set (+l), argless
                lists, args, argsets, argless = value.split(",")
                for letter in lists:
                    self.channel_modes[letter] = IRCMode(letter, multi=True)
                for letter in args:
                    self.channel_modes[letter] = IRCMode(letter, arg_on_set=True, arg_on_remove=True)
                for letter in argsets:
                    self.channel_modes[letter] = IRCMode(letter, arg_on_set=True)
                for letter in argless:
                    self.channel_modes[letter] = IRCMode(letter)
            elif feature == "NETWORK":
                self.network_title = value

    def _handle_JOIN(self, message):
        channel_name, = message.args
        joiner = Peer.from_prefix(message.prefix)
        # TODO should there be a self.me?  how...
        if joiner.name == self.nick:
            # We just joined a channel
            # assert channel_name not in self.joined_channels
            # TODO key?  do we care?
            # TODO what about channel configuration and anon non-joined
            # channels?  how do these all relate...
            channel = IRCChannel(self, channel_name)
            self.joined_channels[channel.name] = channel
        else:
            # Someone else just joined the channel
            self.joined_channels[channel_name].add_user(joiner)

    def _handle_RPL_TOPIC(self, message):
        # Topic.  Sent when joining or when requesting the topic.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, topic_text = message.args
        self._pending_topics[channel_name] = IRCTopic(topic_text)

    def _handle_RPL_TOPICWHOTIME(self, message):
        # Topic author (NONSTANDARD).  Sent after RPL_TOPIC.
        # Unfortunately, there's no way to know whether to expect this.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, author, timestamp = message.args
        topic = self._pending_topics.setdefault(channel_name, IRCTopic(""))
        topic.author = Peer.from_prefix(author)
        topic.timestamp = datetime.utcfromtimestamp(int(timestamp))

    def _handle_RPL_NAMREPLY(self, message):
        # Names response.  Sent when joining or when requesting a names
        # list.  Must be ended with a RPL_ENDOFNAMES.
        me, useless_equals_sign, channel_name, *raw_names = message.args
        # List of names is actually optional (?!)
        if raw_names:
            raw_names = raw_names[0]
        else:
            raw_names = ""

        names = raw_names.strip(" ").split(" ")
        namelist = self._pending_names.setdefault(channel_name, [])
        # TODO modes?  should those be stripped off here?
        # TODO for that matter should these become peers here?
        namelist.extend(names)

    def _handle_RPL_ENDOFNAMES(self, message):
        # End of names list.  Sent at the very end of a join or the very
        # end of a NAMES request.
        me, channel_name, info = message.args
        namelist = self._pending_names.pop(channel_name, [])

        if channel_name in self._names_futures:
            # TODO we should probably not ever have a names future AND a
            # pending join at the same time.  or, does it matter?
            self._names_futures[channel_name].set_result(namelist)
            del self._names_futures[channel_name]

        if channel_name in self.joined_channels:
            # Join synchronized!
            channel = self.joined_channels[channel_name]
            channel.sync = True

            channel.topic = self._pending_topics.pop(channel_name, None)

            for name in namelist:
                modes = set()
                # TODO use features!
                while name and name[0] in "+%@&~":
                    modes.add(name[0])
                    name = name[1:]

                # TODO haha no this is so bad.
                # TODO the bot should, obviously, keep a record of all
                # known users as well.  alas, mutable everything.
                peer = Peer(name, None, None)

                channel.add_user(peer, modes)

            if channel_name in self._join_futures:
                # Update the Future
                self._join_futures[channel_name].set_result(channel)
                del self._join_futures[channel_name]

    def _handle_PRIVMSG(self, message):
        # PRIVMSG target :text
        target_name, text = message.args

        source = Peer.from_prefix(message.prefix)

        if target_name[0] in self.channel_types:
            target = self.get_channel(target_name)
            cls = PublicMessage
        else:
            # TODO this is /us/, so, surely ought to be known
            target = Peer(target_name, None, None)
            cls = PrivateMessage

        return cls(source, target, text, client=self, raw=message)

    @asyncio.coroutine
    def read_event(self):
        """Produce a single IRC event.

        This client does not do any kind of multiplexing or event handler
        notification; that's left to a higher level.
        """
        message, event = yield from self.read_queue.get()
        return event

    # Implementations of particular commands

    # TODO should these be part of the general client interface, or should
    # there be a separate thing that smooths out the details?
    @asyncio.coroutine
    def whois(self, target):
        """Coroutine that queries for information about a target."""
        self.send_message("WHOIS", target)
        messages = yield from self.gather_messages(
            "RPL_WHOISUSER",
            "RPL_WHOISSERVER",
            "RPL_WHOISOPERATOR",
            "RPL_WHOISIDLE",
            "RPL_WHOISCHANNELS",
            "RPL_WHOISVIRT",
            "RPL_WHOIS_HIDDEN",
            "RPL_WHOISSPECIAL",
            "RPL_WHOISSECURE",
            "RPL_WHOISSTAFF",
            "RPL_WHOISLANGUAGE",
            finish=["RPL_ENDOFWHOIS", "ERR_NOSUCHSERVER", "ERR_NONICKNAMEGIVEN", "ERR_NOSUCHNICK"],
        )

        # nb: The first two args for all the responses are our nick and the
        # target's nick.
        # TODO apparently you can whois multiple nicks at a time
        for message in messages:
            if message.command == "RPL_WHOISUSER":
                ident = message.args[2]
                hostname = message.args[3]
                # args[4] is a literal *
                realname = message.args[5]
            elif message.command == "RPL_WHOISIDLE":
                # Idle time.  Some servers (at least, inspircd) also have
                # signon time as unixtime.
                idle = timedelta(seconds=int(message.args[2]))
            elif message.command == "RPL_WHOISCHANNELS":
                # TODO split and parse out the usermodes
                # TODO don't some servers have an extension with multiple modes
                # here
                channels = message.args[2]
            elif message.command == "RPL_WHOISSERVER":
                server = message.args[2]
                server_desc = message.args[3]

        return messages

    @asyncio.coroutine
    def say(self, target, message):
        """Coroutine that sends a message to a target, which may be either a
        `Channel` or a `Peer`.
        """
        self.send_message("PRIVMSG", target, message)

    @asyncio.coroutine
    def join(self, channel_name, key=None):
        """Coroutine that joins a channel, and nonblocks until the join is
        "synchronized" (defined as receiving the nick list).
        """
        if channel_name in self._join_futures:
            return self._join_futures[channel_name]

        # TODO multiple?  error on commas?
        if key is None:
            self.send_message("JOIN", channel_name)
        else:
            self.send_message("JOIN", channel_name, key)

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._join_futures[channel_name] = asyncio.Future()
        return fut

    @asyncio.coroutine
    def names(self, channel_name):
        """Coroutine that returns a list of names in a channel."""
        # TODO there's some ISUPPORT extension that lists /all/ channel modes
        # on each name that comes back...  support that?
        self.send_message("NAMES", channel_name)

        # No need to do the same thing twice
        if channel_name in self._names_futures:
            return self._names_futures[channel_name]

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._names_futures[channel_name] = asyncio.Future()
        return fut

    def set_topic(self, channel, topic):
        """Sets the channel topic."""
        self.send_message("TOPIC", channel, topic)

    # TODO unclear whether this stuff should be separate or what; it's less
    # about the protocol and more about the dywypi interface
    def send_message(self, command, *args):
        message = IRCMessage(command, *args)
        log.debug("sent: %r", message)
        self._writer.write(message.render().encode(self.charset) + b"\r\n")

    def format_transition(self, current_style, new_style):
        if new_style == Style.default():
            # Reset code, ^O
            return "\x0f"

        if new_style.fg != current_style.fg and new_style.fg is Color.default:
            # IRC has no "reset to default" code.  mIRC claims color 99 is for
            # this, but it lies, at least in irssi.  So we must reset and
            # reapply everything.
            ret = "\x0f"
            if new_style.bold is Bold.on:
                ret += "\x02"
            return ret

        ret = ""
        if new_style.fg != current_style.fg:
            ret += FOREGROUND_CODES[new_style.fg]

        if new_style.bold != current_style.bold:
            # There's no on/off for bold, just a toggle
            ret += "\x02"

        return ret
Example #56
0
 def __init__(self, bot):
     self.bot = bot
     self.timeout = int(self.bot.config.get('timeout'))
     self.ping_queue = Queue(loop=bot.loop)
Example #57
0
class IRCClient:
    """Higher-level IRC client.  Takes care of most of the hard parts of IRC:
    incoming server messages are bundled into more intelligible events (see
    ``dywypi.event``), and commands that expect replies are implemented as
    coroutines.
    """

    def __init__(self, loop, network):
        self.loop = loop
        self.network = network
        # TODO should this be a param?  a property of the network?  or, more
        # likely, channel-specific and decoded separately and...
        self.charset = 'utf8'

        self.joined_channels = {}  # name => Channel

        # IRC server features, as reported by ISUPPORT, with defaults taken
        # from the RFC.
        self.len_nick = 9
        self.len_channel = 200
        self.len_message = 510
        # These lengths don't have limits mentioned in the RFC, so going with
        # the smallest known values in the wild
        self.len_kick = 80
        self.len_topic = 80
        self.len_away = 160
        self.max_watches = 0
        self.max_targets = 1
        self.channel_types = set('#&')
        self.channel_modes = {}  # TODO, haha.
        self.channel_prefixes = {}  # TODO here too.  IRCMode is awkward.
        self.network_title = self.network.name
        self.features = {}

        # Various intermediate state used for waiting for replies and
        # aggregating multi-part replies
        # TODO hmmm so what happens if state just gets left here forever?  do
        # we care?
        self._pending_names = {}
        self._names_futures = {}
        self._pending_topics = {}
        self._join_futures = {}

        self._message_waiters = OrderedDict()

        self.read_queue = Queue(loop=loop)

    def get_channel(self, channel_name):
        """Returns a `Channel` object containing everything the client
        definitively knows about the given channel.

        Note that if you, say, ask for the topic of a channel you aren't in and
        then immediately call `get_channel`, the returned object won't have its
        topic populated.  State is only tracked persistently for channels the
        bot is in; otherwise there's no way to know whether or not it's stale.
        """
        if channel_name in self.joined_channels:
            return self.joined_channels[channel_name]
        else:
            return IRCChannel(self, channel_name)

    @asyncio.coroutine
    def connect(self):
        """Coroutine for connecting to a single server.

        Note that this will nonblock until the client is "registered", defined
        as the first PING/PONG exchange.
        """
        # TODO this is a poor excuse for round-robin  :)
        server = self.current_server = self.network.servers[0]

        # TODO i'm pretty sure the server tells us what our nick is, and we
        # should believe that instead
        self.nick = self.network.preferred_nick

        # TODO: handle disconnection, somehow.  probably affects a lot of
        # things.
        self._reader, self._writer = yield from server.connect(self.loop)
        log.debug('connected!')

        if server.password:
            self.send_message('PASS', server.password)
        self.send_message('NICK', self.nick)
        self.send_message('USER', 'dywypi', '-', '-', 'dywypi Python IRC bot')

        # Start the reader loop, or we can't respond to anything
        self._read_loop_task = asyncio.Task(self._start_read_loop())
        asyncio.async(self._read_loop_task, loop=self.loop)

    @asyncio.coroutine
    def disconnect(self):
        # Quit
        self.send_message('QUIT', 'Seeya!')

        # Flush the write buffer
        yield from self._writer.drain()
        self._writer.close()

        # Stop reading events
        self._read_loop_task.cancel()
        # This looks a little funny since this task is already running, but we
        # want to block until it's actually done, which might require dipping
        # back into the event loop
        yield from self._read_loop_task

        # Read until the connection closes
        while not self._reader.at_eof():
            yield from self._reader.readline()

    @asyncio.coroutine
    def _start_read_loop(self):
        """Internal coroutine that just keeps reading from the server in a
        loop.  Called once after a connect and should never be called again
        after that.
        """
        # TODO this is currently just to keep the message queue going, but
        # eventually it should turn them into events and stuff them in an event
        # queue
        while not self._reader.at_eof():
            try:
                yield from self._read_message()
            except CancelledError:
                return
            except Exception:
                log.exception("Smothering exception in IRC read loop")

    @asyncio.coroutine
    def gather_messages(self, *middle, end, errors=()):
        fut = asyncio.Future()
        messages = {}
        for command in middle:
            messages[command] = 'middle'
        for command in end:
            messages[command] = 'end'
        for command in errors:
            messages[command] = 'error'
        collected = []
        self._message_waiters[fut] = (messages, collected)
        yield from fut
        return collected

    @asyncio.coroutine
    def _read_message(self):
        """Internal dispatcher for messages received from the server."""
        line = yield from self._reader.readline()
        assert line.endswith(b'\r\n')
        line = line[:-2]

        # TODO valerr, unicodeerr
        message = IRCMessage.parse(line.decode(self.charset))
        log.debug("recv: %r", message)

        # TODO there is a general ongoing problem here with matching up
        # responses.  ESPECIALLY when error codes are possible.  something here
        # is gonna have to get a bit fancier.

        for fut, (waiting_on, collected) in self._message_waiters.items():
            # TODO this needs to handle error codes too, or the future will
            # linger forever!  potential problem: if the server is lagging
            # behind us, an error code might actually map to a privmsg we tried
            # to send (which has no success response) and we'll get all f****d
            # up.  i don't know if there's any way to solve this.
            # TODO hey stupid question: after we've seen ANY of the waited-on
            # messages, should we pipe all subsequent messages into that future
            # until we see the one that's supposed to end it?  something like
            # a forced JOIN could screw up a join attempt, for example, but if
            # we're getting RPL_TOPIC when we didn't actually ask for the
            # topic, THEN we know we're definitely in the join sequence.
            # TODO also given normal irc response flow, i'm pretty sure we
            # should only ever need to check the first pending future.  there's
            # no way we should need to skip around.
            # TODO maybe give these a timeout so a bad one doesn't f**k us up
            # forever
            if message.command in waiting_on:
                collected.append(message)
                if waiting_on[message.command] == 'end':
                    fut.set_result(collected)
                    del self._message_waiters[fut]
                elif waiting_on[message.command] == 'error':
                    fut.set_exception(IRCError(message))
                    del self._message_waiters[fut]
                break

        # Boy do I ever hate this pattern but it's slightly more maintainable
        # than a 500-line if tree.
        handler = getattr(self, '_handle_' + message.command, None)
        event = None
        if handler:
            event = handler(message)
        self.read_queue.put_nowait((message, event))

    def _handle_PING(self, message):
        # PONG
        self.send_message('PONG', message.args[-1])

    def _handle_RPL_WELCOME(self, message):
        # Initial registration: do autojoins, and any other onconnect work
        self.network.hostname = message.args[1].rsplit(sep='@')[-1]
        for channel_name in self.network.autojoins:
            asyncio.async(self.join(channel_name), loop=self.loop)

    def _handle_RPL_ISUPPORT(self, message):
        me, *features, human_text = message.args
        for feature_string in features:
            feature, _, value = feature_string.partition('=')
            if value is None:
                value = True

            self.features[feature] = value

            if feature == 'NICKLEN':
                self.len_nick = int(value)
            elif feature == 'CHANNELLEN':
                self.len_channel = int(value)
            elif feature == 'KICKLEN':
                self.len_kick = int(value)
            elif feature == 'TOPICLEN':
                self.len_topic = int(value)
            elif feature == 'AWAYLEN':
                self.len_away = int(value)
            elif feature == 'WATCH':
                self.max_watches = int(value)
            elif feature == 'CHANTYPES':
                self.channel_types = set(value)
            elif feature == 'PREFIX':
                # List of channel user modes, in relative priority order, in
                # the format (ov)@+
                assert value[0] == '('
                letters, symbols = value[1:].split(')')
                assert len(letters) == len(symbols)
                self.channel_prefixes.clear()
                for letter, symbol in zip(letters, symbols):
                    mode = IRCMode(letter, prefix=symbol)
                    self.channel_modes[letter] = mode
                    self.channel_prefixes[symbol] = mode
            elif feature == 'MAXTARGETS':
                self.max_targets = int(value)
            elif feature == 'CHANMODES':
                # Four groups delimited by lists: list-style (+b), arg required
                # (+k), arg required only to set (+l), argless
                lists, args, argsets, argless = value.split(',')
                for letter in lists:
                    self.channel_modes[letter] = IRCMode(
                        letter, multi=True)
                for letter in args:
                    self.channel_modes[letter] = IRCMode(
                        letter, arg_on_set=True, arg_on_remove=True)
                for letter in argsets:
                    self.channel_modes[letter] = IRCMode(
                        letter, arg_on_set=True)
                for letter in argless:
                    self.channel_modes[letter] = IRCMode(letter)
            elif feature == 'NETWORK':
                self.network_title = value

    def _handle_JOIN(self, message):
        channel_name, = message.args
        joiner = Peer.from_prefix(message.prefix)
        # TODO should there be a self.me?  how...
        if joiner.name == self.nick:
            # We just joined a channel
            #assert channel_name not in self.joined_channels
            # TODO key?  do we care?
            # TODO what about channel configuration and anon non-joined
            # channels?  how do these all relate...
            channel = IRCChannel(self, channel_name)
            self.joined_channels[channel.name] = channel
        else:
            # Someone else just joined the channel
            self.joined_channels[channel_name].add_user(joiner)

    def _handle_RPL_TOPIC(self, message):
        # Topic.  Sent when joining or when requesting the topic.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, topic_text = message.args
        self._pending_topics[channel_name] = IRCTopic(topic_text)

    def _handle_RPL_TOPICWHOTIME(self, message):
        # Topic author (NONSTANDARD).  Sent after RPL_TOPIC.
        # Unfortunately, there's no way to know whether to expect this.
        # TODO this doesn't handle the "requesting" part
        # TODO what if me != me?
        me, channel_name, author, timestamp = message.args
        topic = self._pending_topics.setdefault(channel_name, IRCTopic(''))
        topic.author = Peer.from_prefix(author)
        topic.timestamp = datetime.utcfromtimestamp(int(timestamp))

    def _handle_RPL_NAMREPLY(self, message):
        # Names response.  Sent when joining or when requesting a names
        # list.  Must be ended with a RPL_ENDOFNAMES.
        me, useless_equals_sign, channel_name, *raw_names = message.args
        # List of names is actually optional (?!)
        if raw_names:
            raw_names = raw_names[0]
        else:
            raw_names = ''

        names = raw_names.strip(' ').split(' ')
        namelist = self._pending_names.setdefault(channel_name, [])
        # TODO modes?  should those be stripped off here?
        # TODO for that matter should these become peers here?
        namelist.extend(names)

    def _handle_RPL_ENDOFNAMES(self, message):
        # End of names list.  Sent at the very end of a join or the very
        # end of a NAMES request.
        me, channel_name, info = message.args
        namelist = self._pending_names.pop(channel_name, [])

        if channel_name in self._names_futures:
            # TODO we should probably not ever have a names future AND a
            # pending join at the same time.  or, does it matter?
            self._names_futures[channel_name].set_result(namelist)
            del self._names_futures[channel_name]

        if channel_name in self.joined_channels:
            # Join synchronized!
            channel = self.joined_channels[channel_name]
            channel.sync = True

            channel.topic = self._pending_topics.pop(channel_name, None)

            for name in namelist:
                modes = set()
                # TODO use features!
                while name and name[0] in '+%@&~':
                    modes.add(name[0])
                    name = name[1:]

                # TODO haha no this is so bad.
                # TODO the bot should, obviously, keep a record of all
                # known users as well.  alas, mutable everything.
                peer = Peer(name, None, None)

                channel.add_user(peer, modes)

            if channel_name in self._join_futures:
                # Update the Future
                self._join_futures[channel_name].set_result(channel)
                del self._join_futures[channel_name]

    def _handle_PRIVMSG(self, message):
        # PRIVMSG target :text
        target_name, text = message.args

        source = Peer.from_prefix(message.prefix)

        if target_name[0] in self.channel_types:
            target = self.get_channel(target_name)
            cls = PublicMessage
        else:
            # TODO this is /us/, so, surely ought to be known
            target = Peer(target_name, None, None)
            cls = PrivateMessage

        return cls(source, target, text, client=self, raw=message)

    @asyncio.coroutine
    def read_event(self):
        """Produce a single IRC event.

        This client does not do any kind of multiplexing or event handler
        notification; that's left to a higher level.
        """
        message, event = yield from self.read_queue.get()
        return event


    # Implementations of particular commands

    # TODO should these be part of the general client interface, or should
    # there be a separate thing that smooths out the details?
    @asyncio.coroutine
    def whois(self, target):
        """Coroutine that queries for information about a target."""
        self.send_message('WHOIS', target)
        messages = yield from self.gather_messages(
            'RPL_WHOISUSER',
            'RPL_WHOISSERVER',
            'RPL_WHOISOPERATOR',
            'RPL_WHOISIDLE',
            'RPL_WHOISCHANNELS',
            'RPL_WHOISVIRT',
            'RPL_WHOIS_HIDDEN',
            'RPL_WHOISSPECIAL',
            'RPL_WHOISSECURE',
            'RPL_WHOISSTAFF',
            'RPL_WHOISLANGUAGE',
            end=[
                'RPL_ENDOFWHOIS',
            ],
            errors=[
                'ERR_NOSUCHSERVER',
                'ERR_NONICKNAMEGIVEN',
                'ERR_NOSUCHNICK',
            ],
        )

        # nb: The first two args for all the responses are our nick and the
        # target's nick.
        # TODO apparently you can whois multiple nicks at a time
        for message in messages:
            if message.command == 'RPL_WHOISUSER':
                ident, hostname
                ident = message.args[2]
                hostname = message.args[3]
                # args[4] is a literal *
                realname = message.args[5]
            elif message.command == 'RPL_WHOISIDLE':
                # Idle time.  Some servers (at least, inspircd) also have
                # signon time as unixtime.
                idle = timedelta(seconds=int(message.args[2]))
            elif message.command == 'RPL_WHOISCHANNELS':
                # TODO split and parse out the usermodes
                # TODO don't some servers have an extension with multiple modes
                # here
                channels = message.args[2]
            elif message.command == 'RPL_WHOISSERVER':
                server = message.args[2]
                server_desc = message.args[3]


        return messages

    @asyncio.coroutine
    def say(self, message, target, notice=False):
        """Coroutine that sends a message to a target, which may be either a
        `Channel` or a `Peer`.
        """
        command = 'NOTICE' if notice else 'PRIVMSG'
        self.send_message(command, target, message)

    @asyncio.coroutine
    def join(self, channel_name, key=None):
        """Coroutine that joins a channel, and nonblocks until the join is
        "synchronized" (defined as receiving the nick list).
        """
        if channel_name in self._join_futures:
            return self._join_futures[channel_name]

        # TODO multiple?  error on commas?
        if key is None:
            self.send_message('JOIN', channel_name)
        else:
            self.send_message('JOIN', channel_name, key)

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._join_futures[channel_name] = asyncio.Future()
        return fut

    @asyncio.coroutine
    def names(self, channel_name):
        """Coroutine that returns a list of names in a channel."""
        self.send_message('NAMES', channel_name)

        # No need to do the same thing twice
        if channel_name in self._names_futures:
            return self._names_futures[channel_name]

        # Clear out any lingering names list
        self._pending_names[channel_name] = []

        # Return a Future, to be populated by the message loop
        fut = self._names_futures[channel_name] = asyncio.Future()
        return fut

    def set_topic(self, channel, topic):
        """Sets the channel topic."""
        self.send_message('TOPIC', channel, topic)

    # TODO unclear whether this stuff should be separate or what; it's less
    # about the protocol and more about the dywypi interface
    def send_message(self, command, *args):
        message = IRCMessage(command, *args)
        log.debug("sent: %r", message)
        self._writer.write(message.render().encode(self.charset) + b'\r\n')

    def format_transition(self, current_style, new_style):
        if new_style == Style.default():
            # Reset code, ^O
            return '\x0f'

        if new_style.fg != current_style.fg and new_style.fg is Color.default:
            # IRC has no "reset to default" code.  mIRC claims color 99 is for
            # this, but it lies, at least in irssi.  So we must reset and
            # reapply everything.
            ret = '\x0f'
            if new_style.bold is Bold.on:
                ret += '\x02'
            return ret

        ret = ''
        if new_style.fg != current_style.fg:
            ret += FOREGROUND_CODES[new_style.fg]

        if new_style.bold != current_style.bold:
            # There's no on/off for bold, just a toggle
            ret += '\x02'

        return ret
Example #58
0
class DywypiShell(UrwidProtocol):
    """Creates a Twisted-friendly urwid app that allows interacting with dywypi
    via a shell.
    """

    # TODO i don't think client.nick should really be part of the exposed
    # interface; should be a .me returning a peer probably
    # TODO for some reason this is the client even though the thing below is
    # actually called a Client so we should figure this the f**k out
    nick = 'dywypi'

    def __init__(self, loop, network, *args, **kwargs):
        super().__init__(loop, **kwargs)

        self.event_queue = Queue(loop=self.loop)

        self.network = network

        self.me = Peer('dywypi', 'dywypi', 'localhost')
        self.you = Peer('user', 'user', 'localhost')


    def build_toplevel_widget(self):
        self.pane = UnselectableListBox(urwid.SimpleListWalker([]))
        prompt = FancyEdit('>>> ')
        urwid.connect_signal(prompt, 'line_submitted', self.handle_line)

        return urwid.Pile(
            [
                self.pane,
                ('flow', prompt),
            ],
            focus_item=prompt,
        )

    def build_palette(self):
        return [
            ('default', 'default', 'default'),
            ('logging-debug', 'dark gray', 'default'),
            ('logging-info', 'light gray', 'default'),
            ('logging-warning', 'light red', 'default'),
            ('logging-error', 'dark red', 'default'),
            ('logging-critical', 'light magenta', 'default'),
            ('shell-input', 'light gray', 'default'),
            ('bot-output', 'default', 'default'),
            ('bot-output-label', 'dark cyan', 'default'),
        ]

    def unhandled_input(self, key):
        # Try passing the key along to the listbox, so pgup/pgdn still work.
        # Note that this is a Pile method specifically, and requires an index
        # rather than a widget
        # TODO no indication whether we're currently scrolled up.  scroll back
        # to bottom after x seconds with no input?
        listsize = self.widget.get_item_size(
            self.urwid_loop.screen_size, 0, False)
        key = self.pane.keypress(listsize, key)
        if key:
            # `key` gets returned if it wasn't consumed
            self.add_log_line(key)

    def start(self):
        super(DywypiShell, self).start()

        #self.hub.network_connected(self.network, self)

    def add_log_line(self, line, color='default'):
        # TODO generalize this color thing in a way compatible with irc, html, ...
        # TODO i super duper want this for logging, showing incoming/outgoing
        # messages in the right colors, etc!!
        self._print_text((color, line.rstrip()))

    def _print_text(self, *encoded_text):
        self.pane.body.append(urwid.Text(list(encoded_text)))
        self.pane.set_focus(len(self.pane.body) - 1)
        # TODO should this just mark dirty??
        self.urwid_loop.draw_screen()

    def handle_line(self, line):
        """Deal with a line of input."""
        try:
            self._handle_line(line)
        except Exception as e:
            log.exception(e)

    def _handle_line(self, line):
        """All the good stuff happens here.

        Various things happen depending on what the line starts with.

        Colon: This is a command; pretend it was sent as a private message.
        """
        # Whatever it was, log it
        self.pane.body.append(urwid.Text(['>>> ', ('shell-input', line)]))

        if line.startswith(':'):
            command_string = line[1:]

            # TODO rather we didn't need raw_message...
            raw_message = ShellMessage(self.me.name, command_string)
            event = Message(self, raw_message)
            self.event_queue.put_nowait(event)

    def _send_message(self, target, message, as_notice=True):
        # TODO cool color
        self.add_log_line(message)

    @asyncio.coroutine
    def say(self, target, message):
        # TODO target should probably be a peer, eh
        if target == self.you.name:
            prefix = "bot to you: "
        else:
            prefix = "bot to {}: ".format(target)
        self._print_text(('bot-output-label', prefix), ('bot-output', message))

    def format_transition(self, current_style, new_style):
        # TODO wait lol shouldn't this be converting to urwid-style tuples
        if new_style == Style.default():
            # Just use the reset sequence
            return '\x1b[0m'

        ansi_codes = []
        if new_style.fg != current_style.fg:
            ansi_codes.append(FOREGROUND_CODES[new_style.fg])

        if new_style.bold != current_style.bold:
            ansi_codes.append(BOLD_CODES[new_style.bold])

        return '\x1b[' + ';'.join(ansi_codes) + 'm'