Пример #1
0
def test_worker_close(worker):
    loop = asyncio.new_event_loop()
    asyncio.sleep = mock.Mock(wraps=asyncio.coroutine(lambda *a, **kw: None))
    worker.ppid = 1
    worker.pid = 2
    worker.cfg.graceful_timeout = 1.0
    worker.signal = mock.Mock()
    worker.signal.stopped = False
    worker.wsgi = mock.Mock()
    conn = mock.Mock()
    conn.websocket = mock.Mock()
    conn.websocket.close_connection = mock.Mock(
        wraps=asyncio.coroutine(lambda *a, **kw: None)
    )
    worker.connections = set([conn])
    worker.log = mock.Mock()
    worker.loop = loop
    server = mock.Mock()
    server.close = mock.Mock(wraps=lambda *a, **kw: None)
    server.wait_closed = mock.Mock(
        wraps=asyncio.coroutine(lambda *a, **kw: None)
    )
    worker.servers = {server: {"requests_count": 14}}
    worker.max_requests = 10

    # close worker
    _close = asyncio.ensure_future(worker.close(), loop=loop)
    loop.run_until_complete(_close)

    assert worker.signal.stopped
    assert conn.websocket.close_connection.called
    assert len(worker.servers) == 0
Пример #2
0
    def test_invalid_correlation_id(self):
        host, port = self.kafka_host, self.kafka_port

        request = MetadataRequest([])

        # setup connection with mocked reader and writer
        conn = AIOKafkaConnection(host=host, port=port, loop=self.loop)

        # setup reader
        reader = mock.MagicMock()
        int32 = struct.Struct('>i')
        resp = MetadataResponse(brokers=[], topics=[]).encode()
        resp = int32.pack(999) + resp  # set invalid correlation id
        reader.readexactly.side_effect = [
            asyncio.coroutine(lambda *a, **kw: int32.pack(len(resp)))(),
            asyncio.coroutine(lambda *a, **kw: resp)()]
        writer = mock.MagicMock()

        conn._reader = reader
        conn._writer = writer
        # invoke reader task
        conn._read_task = asyncio.async(conn._read(), loop=self.loop)

        with self.assertRaises(CorrelationIdError):
            yield from conn.send(request)
Пример #3
0
def test_json():

    def handler(request):
        assert request.content_type == 'application/json'
        assert 'jso' in request
        return dict(request)

    sd = {'parameters': parameters}
    r = SwaggerValidationRoute(
        'GET', handler=handler, resource=None,
        swagger_data=sd)
    r.build_swagger_data({})
    request = make_mocked_request(
        'POST', '/',
        headers=multidict.CIMultiDict({
            hdrs.CONTENT_TYPE: 'application/json'
        }),
    )
    request.json = asyncio.coroutine(lambda: {'f': ''})
    request._match_info = {}
    resp = yield from r.handler(request)
    assert isinstance(resp, dict), resp
    assert 'road_id' in resp, resp

    # not valid data
    request.json = asyncio.coroutine(lambda: {'f': 1})
    with pytest.raises(web.HTTPBadRequest):
        yield from r.handler(request)

    try:
        yield from r.handler(request)
    except web.HTTPBadRequest as e:
        resp = e

    assert resp.reason['jso.f'], resp.reason
Пример #4
0
def test__run_ok(worker, loop):
    worker.ppid = 1
    worker.alive = True
    worker.servers = {}
    sock = mock.Mock()
    sock.cfg_addr = ('localhost', 8080)
    worker.sockets = [sock]
    worker.wsgi = mock.Mock()
    worker.close = mock.Mock()
    worker.close.return_value = asyncio.Future(loop=loop)
    worker.close.return_value.set_result(())
    worker.log = mock.Mock()
    worker.notify = mock.Mock()
    worker.loop = loop
    ret = asyncio.Future(loop=loop)
    loop.create_server = mock.Mock(
        wraps=asyncio.coroutine(lambda *a, **kw: ret))
    ret.set_result(sock)
    worker.wsgi.make_handler.return_value.num_connections = 1
    worker.cfg.max_requests = 100

    with mock.patch('aiohttp.worker.asyncio') as m_asyncio:
        m_asyncio.sleep = mock.Mock(
            wraps=asyncio.coroutine(lambda *a, **kw: None))
        loop.run_until_complete(worker._run())

    assert worker.notify.called
    assert worker.log.info.called
Пример #5
0
async def test_wait_pong():
    proto = get_wired_protocol()
    node = random_node()
    echoed = "echoed"
    pingid = proto._mkpingid(echoed, node)

    # Schedule a call to proto.recv_pong() simulating a pong from the node we expect.
    recv_pong_coroutine = asyncio.coroutine(lambda: proto.recv_pong(node, echoed))
    asyncio.ensure_future(recv_pong_coroutine())

    got_pong = await proto.wait_pong(pingid)

    assert got_pong
    # Ensure wait_pong() cleaned up after itself.
    assert pingid not in proto.pong_callbacks

    # If the remote node echoed something different than what we expected, wait_pong() would
    # timeout.
    wrong_echo = "foo"
    recv_pong_coroutine = asyncio.coroutine(lambda: proto.recv_pong(node, wrong_echo))
    asyncio.ensure_future(recv_pong_coroutine())

    got_pong = await proto.wait_pong(pingid)

    assert not got_pong
    assert pingid not in proto.pong_callbacks
Пример #6
0
    def start(self):
        """
        Start the nyuki
        The nyuki process is terminated when this method is finished
        """
        self.loop.add_signal_handler(SIGTERM, self.abort, SIGTERM)
        self.loop.add_signal_handler(SIGINT, self.abort, SIGINT)

        # Configure services with nyuki's configuration
        log.debug('Running configure for services')
        for name, service in self._services.all.items():
            service.configure(**self._config.get(name, {}))
        log.debug('Done configuring')

        # Start services
        self.loop.run_until_complete(self._services.start())

        # Call for setup
        if not asyncio.iscoroutinefunction(self.setup):
            log.warning('setup method must be a coroutine')
            self.setup = asyncio.coroutine(self.setup)
        self.loop.run_until_complete(self.setup())

        # Main loop
        self.loop.run_forever()

        # Call for teardown
        if not asyncio.iscoroutinefunction(self.teardown):
            log.warning('teardown method must be a coroutine')
            self.teardown = asyncio.coroutine(self.teardown)
        self.loop.run_until_complete(self.teardown())

        # Close everything : terminates nyuki
        self.loop.close()
Пример #7
0
    def test_compacted_topic_consumption(self):
        # Compacted topics can have offsets skipped
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        subscriptions = SubscriptionState(loop=self.loop)
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [(tp.topic, [(tp.partition, 155, 100000)])])

        builder = LegacyRecordBatchBuilder(
            magic=1, compression_type=0, batch_size=99999999)
        builder.append(160, value=b"12345", key=b"1", timestamp=None)
        builder.append(162, value=b"23456", key=b"2", timestamp=None)
        builder.append(167, value=b"34567", key=b"3", timestamp=None)
        batch = bytes(builder.build())

        resp = FetchResponse(
            [('test', [(
                0, 0, 3000,  # partition, error_code, highwater_offset
                batch  # Batch raw bytes
            )])])

        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)
        client.send.side_effect = asyncio.coroutine(lambda n, r: resp)

        tp_state.seek(155)
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        # Test successful getone, the closest in batch offset=160
        first = buf.getone()
        self.assertEqual(tp_state.position, 161)
        self.assertEqual(
            (first.value, first.key, first.offset),
            (b"12345", b"1", 160))

        # Test successful getmany
        second, third = buf.getall()
        self.assertEqual(tp_state.position, 168)
        self.assertEqual(
            (second.value, second.key, second.offset),
            (b"23456", b"2", 162))
        self.assertEqual(
            (third.value, third.key, third.offset),
            (b"34567", b"3", 167))
Пример #8
0
    async def _test_manager():
        mock.setup_app_manager()
        # check manager role
        view_callable = asyncio.coroutine(get_user)
        info = await view_callable(mock.app)
        assert info.status == 200
        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['result']['roles'] == {'site administrator': 1}

        # check manager can assign roles
        mock.params['scope'] = mock.scope
        mock.params['user'] = mock.new_user
        mock.params['roles'] = ['Editor']
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(grant_user_scope_roles)
        info = await view_callable(mock.app)
        assert info.status in (200, 400)
        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['result'] in ('success', 'attributeOrValueExists')

        # check manager can get user and check added roles
        mock.params['user'] = mock.new_user
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(get_user)
        info = await view_callable(mock.app)
        assert info.status == 200
        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['result']['roles'] == {'Editor': 1}
Пример #9
0
async def test_wait_ping():
    proto = get_wired_protocol()
    node = random_node()
    echo = "echo"

    # Schedule a call to proto.recv_ping() simulating a ping from the node we expect.
    recv_ping_coroutine = asyncio.coroutine(lambda: proto.recv_ping(node, echo))
    asyncio.ensure_future(recv_ping_coroutine())

    got_ping = await proto.wait_ping(node)

    assert got_ping
    # Ensure wait_ping() cleaned up after itself.
    assert node not in proto.ping_callbacks

    # If we waited for a ping from a different node, wait_ping() would timeout and thus return
    # false.
    recv_ping_coroutine = asyncio.coroutine(lambda: proto.recv_ping(node, echo))
    asyncio.ensure_future(recv_ping_coroutine())

    node2 = random_node()
    got_ping = await proto.wait_ping(node2)

    assert not got_ping
    assert node2 not in proto.ping_callbacks
Пример #10
0
    async def _test_add_scope():
        #add scope by superadmin
        mock.setup_app_superuser()
        mock.params['scope'] = 'nou_test'
        mock.params['admin_user'] = '******'
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(add_scope)
        info = await view_callable(mock.app)
        assert info.status in (200, 400)
        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['result'] in ('success', 'entryAlreadyExists')

        #add already added scope
        mock.params['scope'] = 'nou_test'
        mock.params['admin_user'] = '******'
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(add_scope)
        info = await view_callable(mock.app)
        assert info.status == 400
        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['result'] == 'entryAlreadyExists'

        #add scope by not superadmin
        mock.setup_app_user()
        mock.params['scope'] = 'nou_test_2'
        mock.params['admin_user'] = '******'
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(add_scope)
        with unittest.TestCase().assertRaises(
            HTTPBadRequest, msg = 'NOT VALID token: must be superuser'):
            info = await view_callable(mock.app)
Пример #11
0
    async def _test_search_user():
        mock.setup_app_client()
        mock.params['criteria'] = '{"mail": "'+mock.user+'"}'
        mock.params['exact_match'] = 'True'
        mock.params['attrs'] = '["mail"]'
        mock.params['page'] = '0'
        mock.params['num_x_page'] = '30'
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(search_user)
        info = await view_callable(mock.app)
        assert info.status == 200

        #search user empty criteria without exact_match
        mock.params['criteria'] = '{"displayName": ""}'
        mock.params['exact_match'] = None
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(search_user)
        info = await view_callable(mock.app)
        assert info.status == 200

        #search all users in scope
        mock.params['criteria'] = '{}'
        mock.params['exact_match'] = None
        mock.app = mock.app._replace(post=payload(mock.params), headers=mock.headers)
        view_callable = asyncio.coroutine(search_user)
        info = await view_callable(mock.app)
        assert info.status == 200
        info_decoded = jwt.decode(info.body, secret)
Пример #12
0
    def _mock_call(_mock_self, *args, **kwargs):
        try:
            result = super()._mock_call(*args, **kwargs)
        except StopIteration as e:
            side_effect = _mock_self.side_effect
            if side_effect is not None and not callable(side_effect):
                raise

            result = asyncio.coroutine(_raise)(e)
        except BaseException as e:
            result = asyncio.coroutine(_raise)(e)

        _call = _mock_self.call_args

        @asyncio.coroutine
        def proxy():
            try:
                if inspect.isawaitable(result):
                    return (yield from result)
                else:
                    return result
            finally:
                _mock_self.await_count += 1
                _mock_self.await_args = _call
                _mock_self.await_args_list.append(_call)
                yield from _mock_self.awaited._notify()

        return proxy()
Пример #13
0
    def test_correlation_id_on_group_coordinator_req(self):
        host, port = self.kafka_host, self.kafka_port

        request = GroupCoordinatorRequest(consumer_group='test')

        # setup connection with mocked reader and writer
        conn = AIOKafkaConnection(host=host, port=port, loop=self.loop)

        # setup reader
        reader = mock.MagicMock()
        int32 = struct.Struct('>i')
        resp = GroupCoordinatorResponse(
            error_code=0, coordinator_id=22,
            host='127.0.0.1', port=3333).encode()
        resp = int32.pack(0) + resp  # set correlation id to 0
        reader.readexactly.side_effect = [
            asyncio.coroutine(lambda *a, **kw: int32.pack(len(resp)))(),
            asyncio.coroutine(lambda *a, **kw: resp)()]
        writer = mock.MagicMock()

        conn._reader = reader
        conn._writer = writer
        # invoke reader task
        conn._read_task = asyncio.async(conn._read(), loop=self.loop)

        response = yield from conn.send(request)
        self.assertIsInstance(response, GroupCoordinatorResponse)
        self.assertEqual(response.error_code, 0)
        self.assertEqual(response.coordinator_id, 22)
        self.assertEqual(response.host, '127.0.0.1')
        self.assertEqual(response.port, 3333)
    def _test_say_hello_service():
        dummy = app
        # We get the service token
        params = {}
        params['grant_type'] = 'service'
        params['client_id'] = 'plone'
        params['client_secret'] = 'plone'
        headers = {'User-Agent': 'DUMMY', 'Host': '127.0.0.1:8080'}
        dummy = dummy._replace(post=payload(params), headers=headers)

        view_callable = asyncio.coroutine(get_token)
        info = yield from view_callable(dummy)
        assert info.status == 200

        info_decoded = jwt.decode(info.body, secret)
        service_token = info_decoded['service_token']

        # We get the authorize code
        params = {}
        params['response_type'] = 'code'
        params['client_id'] = 'plone'
        params['service_token'] = service_token
        params['scopes'] = ['plone']
        dummy = app._replace(post=payload(params))

        view_callable = asyncio.coroutine(get_authorization_code)
        info = yield from view_callable(dummy)
        assert info.status == 200

        info_decoded = jwt.decode(info.body, secret)

        # We get the working token for the client app
        params = {}
        params['grant_type'] = 'user'
        params['code'] = info_decoded['auth_code']
        params['username'] = '******'
        params['password'] = '******'
        params['scopes'] = ['plone']
        params['client_id'] = 'plone'
        headers = {'User-Agent': 'DUMMY', 'Host': '127.0.0.1:8080'}
        dummy = app._replace(post=payload(params), headers=headers)

        view_callable = asyncio.coroutine(get_token)
        info = yield from view_callable(dummy)
        assert info.status == 200

        info_decoded = jwt.decode(info.body, secret)
        user_token = info_decoded['token']

        params = {'code': service_token, 'token': user_token}
        dummy = app._replace(post=payload(params))

        view_callable = asyncio.coroutine(valid_token)
        info = yield from view_callable(dummy)
        assert info.status == 200

        info_decoded = jwt.decode(info.body, secret)
        assert info_decoded['user'] == '*****@*****.**'
Пример #15
0
    async def test_handle_batch(self):
        """
        Tests that the handle method accepts a batch of requests
        """

        req = json.dumps(
            [
                {
                    'jsonrpc': '2.0',
                    'method':  'testification',
                    'params':  {'test': 'value'},
                    'id':      10,
                },
                {
                    'jsonrpc': '2.0',
                    'method':  'notify',
                },
            ]
        )

        mock_testification_handler = unittest.mock.Mock()
        mock_testification_handler.return_value = {'Mishief': 'managed'}

        mock_notify_handler = unittest.mock.Mock()
        mock_notify_handler.return_value = [1, 2, 3]

        dispatcher = Dispatcher(
            methods = {
                'testification': asyncio.coroutine(mock_testification_handler),
                'notify':        asyncio.coroutine(mock_notify_handler),
            },
        )

        resp = await JsonRpcResponseManager.handle(req, dispatcher)

        self.assertIsInstance(
            resp,
            JsonRpcBatchResponse,
            'Response was not the right type',
        )

        self.assertListEqual(
            [10],
            [r.response_id for r in resp],
            'Response ID\'s did not match',
        )

        self.assertListEqual(
            [{'Mishief': 'managed'}],
            [r.result for r in resp],
            'Response result did not match',
        )

        mock_testification_handler.assert_called_with(test = 'value')

        mock_notify_handler.assert_called_with()
Пример #16
0
 def test_match_second_result_in_table(self):
     handler1 = asyncio.coroutine(lambda req: Response(req))
     handler2 = asyncio.coroutine(lambda req: Response(req))
     self.router.add_route('GET', '/h1', handler1)
     self.router.add_route('POST', '/h2', handler2)
     req = self.make_request('POST', '/h2')
     info = self.loop.run_until_complete(self.router.resolve(req))
     self.assertIsNotNone(info)
     self.assertEqual({}, info)
     self.assertIs(handler2, info.handler)
Пример #17
0
 def _get_transaction(self, txid, parent_block=None):
     meta = {'parent_block': parent_block}
     if self.async:
         return chain(txid,
                      lambda txid: self.btcd.get_raw_transaction(txid, async=True),
                      lambda rawtx: self.btcd.decode_raw_transaction(rawtx.get('result'), async=True),
                      asyncio.coroutine(lambda json_obj: BTCDTransaction(json_obj.get('result'),
                                                                         meta=meta)))
     else:
         rawtx = self.btcd.get_raw_transaction(txid)
         json_obj = self.btcd.decode_raw_transaction(rawtx.get('result'))
         return BTCDTransaction(json_obj.get('result'))
def add_route(app, fn):
    method = getattr(fn, "__method__", None)# 获取fn.__method__属性,若不存在就返回None
    path = getattr(fn, "__route__", None)
    # http method或path位置,将无法进行处理,因此报错
    if path is None or method is None:
        raise ValueError("@get or @post not defined in %s" % str(fn))
    # 将非协程或生成器的函数变成一个协程
    if not asyncio.coroutine(fn) and not inspect.isgeneratorfunction(fn):
        fn = asyncio.coroutine(fn)
    logging.info("add route %s %s =>%s(%s)" % (method,path, fn.__name__, '.'.join(inspect.signature(fn).parameters.keys())))
    # 注册request handler
    app.router.add_route(method, path, RequestHandler(app, fn))
Пример #19
0
async def test_add_msg_w_response(loop):
    app = BaseApplication('app', loop=loop, CONFIG='config.tests')
    qp = AsyncQueueProducer(app)
    qp.add_msg_to_queue = Mock(side_effect=coroutine(
        lambda q, m: "24d8f597g8utr"
    ))
    qp.get_message_from_queue = Mock(side_effect=coroutine(
        lambda cid: "response"
    ))

    await qp.add_msg_to_queue_with_response('queue1', "qweqwe")
    assert qp.add_msg_to_queue.called
    assert qp.get_message_from_queue.called
Пример #20
0
def test_event():
    e = event.Event('MyEvent')
    res = []
    a = asyncio.coroutine(lambda arg: res.append('a' + arg))
    b = asyncio.coroutine(lambda arg: res.append('b' + arg))
    e.add_observer(a)
    yield from e.fire('1')
    e.add_observer(b)
    yield from e.fire('2')
    e.remove_observer(a)
    yield from e.fire('3')
    e.remove_observer(b)
    yield from e.fire('4')
    assert res == ['a1', 'a2', 'b2', 'b3']
Пример #21
0
def test_event():
    e = event.Event("MyEvent")
    res = []
    a = asyncio.coroutine(lambda arg: res.append("a" + arg))
    b = asyncio.coroutine(lambda arg: res.append("b" + arg))
    e.add_observer(a)
    yield from e.fire("1")
    e.add_observer(b)
    yield from e.fire("2")
    e.remove_observer(a)
    yield from e.fire("3")
    e.remove_observer(b)
    yield from e.fire("4")
    assert res == ["a1", "a2", "b2", "b3"]
Пример #22
0
    def fire(self, event, *args, **kwargs):
        for each in self.get_listeners(event):
            each = asyncio.coroutine(each)
            yield from each(*args, **kwargs)

        for observer, prefix in self._observers.items():
            l = getattr(observer, prefix + str(event).lower(), False)
            if l and callable(l):
                l = asyncio.coroutine(l)
                yield from l(*args, **kwargs)

        for each in self.get_catch_all_listeners():
            each = asyncio.coroutine(each)
            yield from each(event, *args, **kwargs)
Пример #23
0
 def __new__(mcs, name, bases, clsdict):
     for key, value in clsdict.items():
         if callable(value) and (value.__name__.startswith("on_") or
                                 hasattr(value, "_command")):
             clsdict[key] = asyncio.coroutine(value)
     c = type.__new__(mcs, name, bases, clsdict)
     return c
Пример #24
0
def wraps_with_context(func, context):
    """Return a wrapped partial(func, context)"""
    wrapped = functools.partial(func, context)
    wrapped = functools.wraps(func)(wrapped)
    if asyncio.iscoroutinefunction(func):
        wrapped = asyncio.coroutine(wrapped)
    return wrapped
Пример #25
0
 def __init__(self,
              coro: Callable[..., Awaitable[None]],
              args: Tuple[Any, ...],
              kwargs: Dict[str, Any]) -> None:
     self.coro = asyncio.coroutine(coro)
     self.args = args
     self.kwargs = kwargs
Пример #26
0
    def __init__(self, method, handler, *,
                 expect_handler=None,
                 resource=None):

        if expect_handler is None:
            expect_handler = _defaultExpectHandler

        assert asyncio.iscoroutinefunction(expect_handler), \
            'Coroutine is expected, got {!r}'.format(expect_handler)

        method = upstr(method)
        if method not in self.METHODS:
            raise ValueError("{} is not allowed HTTP method".format(method))

        assert callable(handler), handler
        if asyncio.iscoroutinefunction(handler):
            pass
        elif inspect.isgeneratorfunction(handler):
            warnings.warn("Bare generators are deprecated, "
                          "use @coroutine wrapper", DeprecationWarning)
        elif (isinstance(handler, type) and
              issubclass(handler, AbstractView)):
            pass
        else:
            handler = asyncio.coroutine(handler)

        self._method = method
        self._handler = handler
        self._expect_handler = expect_handler
        self._resource = resource
Пример #27
0
        def wrapped(self, *args, **kwargs):
            if timeout is None:
                actual_timeout = get_async_test_timeout()
            else:
                actual_timeout = get_async_test_timeout(timeout)

            coro_exc = None

            def exc_handler(loop, context):
                nonlocal coro_exc
                coro_exc = context['exception']

                # Raise CancelledError from run_until_complete below.
                task.cancel()

            self.loop.set_exception_handler(exc_handler)
            coro = asyncio.coroutine(f)(self, *args, **kwargs)
            coro = asyncio.wait_for(coro, actual_timeout, loop=self.loop)
            task = ensure_future(coro, loop=self.loop)
            try:
                self.loop.run_until_complete(task)
            except:
                if coro_exc:
                    # Raise the error thrown in on_timeout, with only the
                    # traceback from the coroutine itself, not from
                    # run_until_complete.
                    raise coro_exc from None

                raise
Пример #28
0
    def wrapper(func):
        is_coro = asyncio.iscoroutinefunction(func)
        if not is_coro:
            func = asyncio.coroutine(func)

        decorated_coro = decorator(func)
        assert asyncio.iscoroutinefunction(decorated_coro)

        if is_coro:
            return decorated_coro
        else:
            # Unwrap the coroutine. We know it should never yield.
            @functools.wraps(decorated_coro, assigned=functools.WRAPPER_ASSIGNMENTS + EXTRA_PARAMS, updated=())
            def decorated_func(*args, **kwargs):
                x = iter(decorated_coro(*args, **kwargs))
                try:
                    next(x)
                except StopIteration as e:
                    return e.value
                else:
                    raise Exception(
                        "Decorator %s behaving badly wrapping non-coroutine %s" % (decorator.__name__, func.__name__)
                    )

            return decorated_func
Пример #29
0
    def outter(old_func):
        func = asyncio.coroutine(old_func)
        @asyncio.coroutine
        @wraps(func)
        def inner(protocol, plugin, future):
            while True:
                if future.cancelled():
                    break
                cfg = protocol.config['plugin:%s' % plugin.name]
                time_to_sleep = cfg.getint(time_config_key, default)
                if time_to_sleep == 0:
                    break

                if protocol.ready:
                    #special random delay
                    yield from asyncio.sleep(random() * 3)
                    try:
                        yield from func(protocol, plugin, cfg)
                    except:
                        traceback.print_exc(file=sys.stdout)


                yield from asyncio.sleep(time_to_sleep)
        inner.is_loop = True
        return inner
Пример #30
0
            def run_in_loop(s_args, s_kwargs, c_args, c_kwargs):

                loop = asyncio.new_event_loop()
                asyncio.set_event_loop(None)

                server = aioftp.Server(*s_args, loop=loop, **s_kwargs)
                client = aioftp.Client(*c_args, loop=loop, **c_kwargs)

                coro = asyncio.coroutine(f)
                try:

                    loop.run_until_complete(coro(loop, client, server))

                finally:

                    if hasattr(server, "server"):

                        server.close()
                        loop.run_until_complete(server.wait_closed())

                    if hasattr(client, "writer"):

                        client.close()

                    loop.close()
Пример #31
0
def _ensure_coroutine(coro_or_func):
    if asyncio.iscoroutinefunction(coro_or_func):
        return coro_or_func
    else:
        return asyncio.coroutine(coro_or_func)
Пример #32
0
 def wrapper(*args, **kwargs):
     coro = asyncio.coroutine(f)
     future = coro(*args, **kwargs)
     loop = asyncio.get_event_loop()
     loop.run_until_complete(future)
Пример #33
0
class DataQueue:
    """DataQueue is a general-purpose blocking queue with one reader."""
    def __init__(self, *, loop=None):
        self._loop = loop
        self._eof = False
        self._waiter = None
        self._exception = None
        self._size = 0
        self._buffer = collections.deque()

    def __len__(self):
        return len(self._buffer)

    def is_eof(self):
        return self._eof

    def at_eof(self):
        return self._eof and not self._buffer

    def exception(self):
        return self._exception

    def set_exception(self, exc):
        self._eof = True
        self._exception = exc

        waiter = self._waiter
        if waiter is not None:
            self._waiter = None
            if not waiter.done():
                waiter.set_exception(exc)

    def feed_data(self, data, size=0):
        self._size += size
        self._buffer.append((data, size))

        waiter = self._waiter
        if waiter is not None:
            self._waiter = None
            if not waiter.cancelled():
                waiter.set_result(True)

    def feed_eof(self):
        self._eof = True

        waiter = self._waiter
        if waiter is not None:
            self._waiter = None
            if not waiter.cancelled():
                waiter.set_result(False)

    @asyncio.coroutine
    def read(self):
        if not self._buffer and not self._eof:
            assert not self._waiter
            self._waiter = helpers.create_future(self._loop)
            try:
                yield from self._waiter
            except (asyncio.CancelledError, asyncio.TimeoutError):
                self._waiter = None
                raise

        if self._buffer:
            data, size = self._buffer.popleft()
            self._size -= size
            return data
        else:
            if self._exception is not None:
                raise self._exception
            else:
                raise EofStream

    if helpers.PY_35:

        def __aiter__(self):
            return AsyncStreamIterator(self.read)

        if not helpers.PY_352:  # pragma: no cover
            __aiter__ = asyncio.coroutine(__aiter__)
Пример #34
0
class ResultProxy:
    """Wraps a DB-API cursor object to provide easier access to row columns.

    Individual columns may be accessed by their integer position,
    case-insensitive column name, or by sqlalchemy schema.Column
    object. e.g.:

      row = fetchone()

      col1 = row[0]    # access via integer position

      col2 = row['col2']   # access via name

      col3 = row[mytable.c.mycol] # access via Column object.

    ResultProxy also handles post-processing of result column
    data using sqlalchemy TypeEngine objects, which are referenced from
    the originating SQL statement that produced this result set.
    """
    def __init__(self, connection, cursor, dialect, result_map=None):
        self._dialect = dialect
        self._closed = False
        self._result_map = result_map
        self._cursor = cursor
        self._connection = connection
        self._rowcount = cursor.rowcount

        if cursor.description is not None:
            self._metadata = ResultMetaData(self, cursor.description)
            self._weak = weakref.ref(self, lambda wr: cursor.close())
        else:
            self._metadata = None
            self.close()
            self._weak = None

    @property
    def dialect(self):
        """SQLAlchemy dialect."""
        return self._dialect

    @property
    def cursor(self):
        return self._cursor

    def keys(self):
        """Return the current set of string keys for rows."""
        if self._metadata:
            return tuple(self._metadata.keys)
        else:
            return ()

    @property
    def rowcount(self):
        """Return the 'rowcount' for this result.

        The 'rowcount' reports the number of rows *matched*
        by the WHERE criterion of an UPDATE or DELETE statement.

        .. note::

           Notes regarding .rowcount:


           * This attribute returns the number of rows *matched*,
             which is not necessarily the same as the number of rows
             that were actually *modified* - an UPDATE statement, for example,
             may have no net change on a given row if the SET values
             given are the same as those present in the row already.
             Such a row would be matched but not modified.

           * .rowcount is *only* useful in conjunction
             with an UPDATE or DELETE statement.  Contrary to what the Python
             DBAPI says, it does *not* return the
             number of rows available from the results of a SELECT statement
             as DBAPIs cannot support this functionality when rows are
             unbuffered.

           * Statements that use RETURNING may not return a correct
             rowcount.
        """
        return self._rowcount

    @property
    def returns_rows(self):
        """True if this ResultProxy returns rows.

        I.e. if it is legal to call the methods .fetchone(),
        .fetchmany() and .fetchall()`.
        """
        return self._metadata is not None

    @property
    def closed(self):
        return self._closed

    def close(self):
        """Close this ResultProxy.

        Closes the underlying DBAPI cursor corresponding to the execution.

        Note that any data cached within this ResultProxy is still available.
        For some types of results, this may include buffered rows.

        If this ResultProxy was generated from an implicit execution,
        the underlying Connection will also be closed (returns the
        underlying DBAPI connection to the connection pool.)

        This method is called automatically when:

        * all result rows are exhausted using the fetchXXX() methods.
        * cursor.description is None.
        """

        if not self._closed:
            self._closed = True
            self._cursor.close()
            # allow consistent errors
            self._cursor = None
            self._weak = None

    def __iter__(self):
        warnings.warn("Iteration over ResultProxy is deprecated",
                      DeprecationWarning,
                      stacklevel=2)
        while True:
            row = yield from self.fetchone()
            if row is None:
                return
            else:
                yield row

    if PY_35:  # pragma: no branch

        def __aiter__(self):
            return self

        if not PY_352:
            __aiter__ = asyncio.coroutine(__aiter__)

        @asyncio.coroutine
        def __anext__(self):
            ret = yield from self.fetchone()
            if ret is not None:
                return ret
            else:
                raise StopAsyncIteration

    def _non_result(self):
        if self._metadata is None:
            raise exc.ResourceClosedError(
                "This result object does not return rows. "
                "It has been closed automatically.")
        else:
            raise exc.ResourceClosedError("This result object is closed.")

    def _process_rows(self, rows):
        process_row = RowProxy
        metadata = self._metadata
        keymap = metadata._keymap
        processors = metadata._processors
        return [process_row(metadata, row, processors, keymap) for row in rows]

    @asyncio.coroutine
    def fetchall(self):
        """Fetch all rows, just like DB-API cursor.fetchall()."""
        try:
            rows = yield from self._cursor.fetchall()
        except AttributeError:
            self._non_result()
        else:
            res = self._process_rows(rows)
            self.close()
            return res

    @asyncio.coroutine
    def fetchone(self):
        """Fetch one row, just like DB-API cursor.fetchone().

        If a row is present, the cursor remains open after this is called.
        Else the cursor is automatically closed and None is returned.
        """
        try:
            row = yield from self._cursor.fetchone()
        except AttributeError:
            self._non_result()
        else:
            if row is not None:
                return self._process_rows([row])[0]
            else:
                self.close()
                return None

    @asyncio.coroutine
    def fetchmany(self, size=None):
        """Fetch many rows, just like DB-API
        cursor.fetchmany(size=cursor.arraysize).

        If rows are present, the cursor remains open after this is called.
        Else the cursor is automatically closed and an empty list is returned.
        """
        try:
            if size is None:
                rows = yield from self._cursor.fetchmany()
            else:
                rows = yield from self._cursor.fetchmany(size)
        except AttributeError:
            self._non_result()
        else:
            res = self._process_rows(rows)
            if len(res) == 0:
                self.close()
            return res

    @asyncio.coroutine
    def first(self):
        """Fetch the first row and then close the result set unconditionally.

        Returns None if no row is present.
        """
        if self._metadata is None:
            self._non_result()
        try:
            return (yield from self.fetchone())
        finally:
            self.close()

    @asyncio.coroutine
    def scalar(self):
        """Fetch the first column of the first row, and close the result set.

        Returns None if no row is present.
        """
        row = yield from self.first()
        if row is not None:
            return row[0]
        else:
            return None
Пример #35
0
def _decorate_coroutine_callable(func, new_patching):
    if hasattr(func, 'patchings'):
        func.patchings.append(new_patching)
        return func

    # Python 3.5 returns True for is_generator_func(new_style_coroutine) if
    # there is an "await" statement in the function body, which is wrong. It is
    # fixed in 3.6, but I can't find which commit fixes this.
    # The only way to work correctly with 3.5 and 3.6 seems to use
    # inspect.iscoroutinefunction()
    is_generator_func = inspect.isgeneratorfunction(func)
    is_coroutine_func = asyncio.iscoroutinefunction(func)
    try:
        is_native_coroutine_func = inspect.iscoroutinefunction(func)
    except AttributeError:
        is_native_coroutine_func = False

    if not (is_generator_func or is_coroutine_func):
        return None

    patchings = [new_patching]

    def patched_factory(*args, **keywargs):
        extra_args = []
        patchers_to_exit = []
        patch_dict_with_limited_scope = []

        exc_info = tuple()
        try:
            for patching in patchings:
                arg = patching.__enter__()
                if patching.scope == LIMITED:
                    patchers_to_exit.append(patching)
                if isinstance(patching, _patch_dict):
                    if patching.scope == GLOBAL:
                        for limited_patching in patch_dict_with_limited_scope:
                            if limited_patching.in_dict is patching.in_dict:
                                limited_patching._keep_global_patch(patching)
                    else:
                        patch_dict_with_limited_scope.append(patching)
                else:
                    if patching.attribute_name is not None:
                        keywargs.update(arg)
                        if patching.new is DEFAULT:
                            patching.new = arg[patching.attribute_name]
                    elif patching.new is DEFAULT:
                        patching.mock_to_reuse = arg
                        extra_args.append(arg)

            args += tuple(extra_args)
            gen = func(*args, **keywargs)
            return _PatchedGenerator(gen, patchings,
                                     asyncio.iscoroutinefunction(func))
        except:
            if patching not in patchers_to_exit and _is_started(patching):
                # the patcher may have been started, but an exception
                # raised whilst entering one of its additional_patchers
                patchers_to_exit.append(patching)
            # Pass the exception to __exit__
            exc_info = sys.exc_info()
            # re-raise the exception
            raise
        finally:
            for patching in reversed(patchers_to_exit):
                patching.__exit__(*exc_info)

    # wrap the factory in a native coroutine  or a generator to respect
    # introspection.
    if is_native_coroutine_func:
        # inspect.iscoroutinefunction() returns True
        patched = _awaitable.make_native_coroutine(patched_factory)
    elif is_generator_func:
        # inspect.isgeneratorfunction() returns True
        def patched_generator(*args, **kwargs):
            return (yield from patched_factory(*args, **kwargs))

        patched = patched_generator

        if is_coroutine_func:
            # asyncio.iscoroutinefunction() returns True
            patched = asyncio.coroutine(patched)
    else:
        patched = patched_factory

    patched.patchings = patchings
    return functools.wraps(func)(patched)
Пример #36
0
 def __init__(self, func):  # Initialization with a URL handler
     self._func = asyncio.coroutine(func)
Пример #37
0
    def test_returns_coroutine_from_side_effect_being_a_coroutine(self, klass):
        mock = klass()
        mock.side_effect = asyncio.coroutine(lambda: 'ProbeValue')

        self.assertEqual('ProbeValue', run_coroutine(mock()))
Пример #38
0
class AIOKafkaConsumer(object):
    """
    A client that consumes records from a Kafka cluster.

    The consumer will transparently handle the failure of servers in the Kafka
    cluster, and adapt as topic-partitions are created or migrate between
    brokers. It also interacts with the assigned kafka Group Coordinator node
    to allow multiple consumers to load balance consumption of topics (feature
    of kafka >= 0.9.0.0).

    .. _create_connection:
        https://docs.python.org/3/library/asyncio-eventloop.html\
        #creating-connections

    Arguments:
        *topics (str): optional list of topics to subscribe to. If not set,
            call subscribe() or assign() before consuming records. Passing
            topics directly is same as calling ``subscribe()`` API.
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the consumer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client. Also
            submitted to GroupCoordinator for logging with respect to
            consumer group administration. Default: 'aiokafka-{version}'
        group_id (str or None): name of the consumer group to join for dynamic
            partition assignment (if enabled), and to use for fetching and
            committing offsets. If None, auto-partition assignment (via
            group coordinator) and offset commits are disabled.
            Default: None
        key_deserializer (callable): Any callable that takes a
            raw message key and returns a deserialized key.
        value_deserializer (callable, optional): Any callable that takes a
            raw message value and returns a deserialized value.
        fetch_min_bytes (int): Minimum amount of data the server should
            return for a fetch request, otherwise wait up to
            fetch_max_wait_ms for more data to accumulate. Default: 1.
        fetch_max_bytes (int): The maximum amount of data the server should
            return for a fetch request. This is not an absolute maximum, if
            the first message in the first non-empty partition of the fetch
            is larger than this value, the message will still be returned
            to ensure that the consumer can make progress. NOTE: consumer
            performs fetches to multiple brokers in parallel so memory
            usage will depend on the number of brokers containing
            partitions for the topic.
            Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 Mb).
        fetch_max_wait_ms (int): The maximum amount of time in milliseconds
            the server will block before answering the fetch request if
            there isn't sufficient data to immediately satisfy the
            requirement given by fetch_min_bytes. Default: 500.
        max_partition_fetch_bytes (int): The maximum amount of data
            per-partition the server will return. The maximum total memory
            used for a request = #partitions * max_partition_fetch_bytes.
            This size must be at least as large as the maximum message size
            the server allows or else it is possible for the producer to
            send messages larger than the consumer can fetch. If that
            happens, the consumer can get stuck trying to fetch a large
            message on a certain partition. Default: 1048576.
        max_poll_records (int): The maximum number of records returned in a
            single call to ``getmany()``. Defaults ``None``, no limit.
        request_timeout_ms (int): Client request timeout in milliseconds.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        auto_offset_reset (str): A policy for resetting offsets on
            OffsetOutOfRange errors: 'earliest' will move to the oldest
            available message, 'latest' will move to the most recent. Any
            ofther value will raise the exception. Default: 'latest'.
        enable_auto_commit (bool): If true the consumer's offset will be
            periodically committed in the background. Default: True.
        auto_commit_interval_ms (int): milliseconds between automatic
            offset commits, if enable_auto_commit is True. Default: 5000.
        check_crcs (bool): Automatically check the CRC32 of the records
            consumed. This ensures no on-the-wire or on-disk corruption to
            the messages occurred. This check adds some overhead, so it may
            be disabled in cases seeking extreme performance. Default: True
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        partition_assignment_strategy (list): List of objects to use to
            distribute partition ownership amongst consumer instances when
            group management is used. This preference is implicit in the order
            of the strategies in the list. When assignment strategy changes:
            to support a change to the assignment strategy, new versions must
            enable support both for the old assignment strategy and the new
            one. The coordinator will choose the old assignment strategy until
            all members have been updated. Then it will choose the new
            strategy. Default: [RoundRobinPartitionAssignor]
        heartbeat_interval_ms (int): The expected time in milliseconds
            between heartbeats to the consumer coordinator when using
            Kafka's group management feature. Heartbeats are used to ensure
            that the consumer's session stays active and to facilitate
            rebalancing when new consumers join or leave the group. The
            value must be set lower than session_timeout_ms, but typically
            should be set no higher than 1/3 of that value. It can be
            adjusted even lower to control the expected time for normal
            rebalances. Default: 3000
        session_timeout_ms (int): The timeout used to detect failures when
            using Kafka's group managementment facilities. Default: 30000
        consumer_timeout_ms (int): maximum wait timeout for background fetching
            routine. Mostly defines how fast the system will see rebalance and
            request new data for new partitions. Default: 200
        api_version (str): specify which kafka API version to use.
            AIOKafkaConsumer supports Kafka API versions >=0.9 only.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.
        exclude_internal_topics (bool): Whether records from internal topics
            (such as offsets) should be exposed to the consumer. If set to True
            the only way to receive records from an internal topic is
            subscribing to it. Requires 0.10+ Default: True
        connections_max_idle_ms (int): Close idle connections after the number
            of milliseconds specified by this config. Specifying `None` will
            disable idle checks. Default: 540000 (9hours).
        isolation_level (str): Controls how to read messages written
            transactionally. If set to *read_committed*,
            ``consumer.getmany()``
            will only return transactional messages which have been committed.
            If set to *read_uncommitted* (the default), ``consumer.getmany()``
            will return all messages, even transactional messages which have
            been aborted.

            Non-transactional messages will be returned unconditionally in
            either mode.

            Messages will always be returned in offset order. Hence, in
            *read_committed* mode, ``consumer.getmany()`` will only return
            messages up to the last stable offset (LSO), which is the one less
            than the offset of the first open transaction. In particular any
            messages appearing after messages belonging to ongoing transactions
            will be withheld until the relevant transaction has been completed.
            As a result, *read_committed* consumers will not be able to read up
            to the high watermark when there are in flight transactions.
            Further, when in *read_committed* the seek_to_end method will
            return the LSO. See method docs below. Default: "read_uncommitted"

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#newconsumerconfigs

    """

    _closed = None  # Serves as an uninitialized flag for __del__
    _source_traceback = None

    def __init__(self, *topics, loop,
                 bootstrap_servers='localhost',
                 client_id='aiokafka-' + __version__,
                 group_id=None,
                 key_deserializer=None, value_deserializer=None,
                 fetch_max_wait_ms=500,
                 fetch_max_bytes=52428800,
                 fetch_min_bytes=1,
                 max_partition_fetch_bytes=1 * 1024 * 1024,
                 request_timeout_ms=40 * 1000,
                 retry_backoff_ms=100,
                 auto_offset_reset='latest',
                 enable_auto_commit=True,
                 auto_commit_interval_ms=5000,
                 check_crcs=True,
                 metadata_max_age_ms=5 * 60 * 1000,
                 partition_assignment_strategy=(RoundRobinPartitionAssignor,),
                 heartbeat_interval_ms=3000,
                 session_timeout_ms=30000,
                 consumer_timeout_ms=200,
                 max_poll_records=None,
                 ssl_context=None,
                 security_protocol='PLAINTEXT',
                 api_version='auto',
                 exclude_internal_topics=True,
                 connections_max_idle_ms=540000,
                 isolation_level="read_uncommitted"):
        if max_poll_records is not None and (
                not isinstance(max_poll_records, int) or max_poll_records < 1):
            raise ValueError("`max_poll_records` should be positive Integer")

        self._client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms,
            retry_backoff_ms=retry_backoff_ms,
            api_version=api_version,
            ssl_context=ssl_context,
            security_protocol=security_protocol,
            connections_max_idle_ms=connections_max_idle_ms)

        self._group_id = group_id
        self._heartbeat_interval_ms = heartbeat_interval_ms
        self._session_timeout_ms = session_timeout_ms
        self._retry_backoff_ms = retry_backoff_ms
        self._auto_offset_reset = auto_offset_reset
        self._request_timeout_ms = request_timeout_ms
        self._enable_auto_commit = enable_auto_commit
        self._auto_commit_interval_ms = auto_commit_interval_ms
        self._partition_assignment_strategy = partition_assignment_strategy
        self._key_deserializer = key_deserializer
        self._value_deserializer = value_deserializer
        self._fetch_min_bytes = fetch_min_bytes
        self._fetch_max_bytes = fetch_max_bytes
        self._fetch_max_wait_ms = fetch_max_wait_ms
        self._max_partition_fetch_bytes = max_partition_fetch_bytes
        self._exclude_internal_topics = exclude_internal_topics
        self._max_poll_records = max_poll_records
        self._consumer_timeout = consumer_timeout_ms / 1000
        self._isolation_level = isolation_level
        self._check_crcs = check_crcs
        self._subscription = SubscriptionState(loop=loop)
        self._fetcher = None
        self._coordinator = None
        self._loop = loop

        if loop.get_debug():
            self._source_traceback = traceback.extract_stack(sys._getframe(1))
        self._closed = False

        if topics:
            topics = self._validate_topics(topics)
            self._client.set_topics(topics)
            self._subscription.subscribe(topics=topics)

    if PY_341:
        # Warn if consumer was not closed properly
        # We don't attempt to close the Consumer, as __del__ is synchronous
        def __del__(self, _warnings=warnings):
            if self._closed is False:
                if PY_36:
                    kwargs = {'source': self}
                else:
                    kwargs = {}
                _warnings.warn("Unclosed AIOKafkaConsumer {!r}".format(self),
                               ResourceWarning,
                               **kwargs)
                context = {'consumer': self,
                           'message': 'Unclosed AIOKafkaConsumer'}
                if self._source_traceback is not None:
                    context['source_traceback'] = self._source_traceback
                self._loop.call_exception_handler(context)

    @asyncio.coroutine
    def start(self):
        """ Connect to Kafka cluster. This will:

            * Load metadata for all cluster nodes and partition allocation
            * Wait for possible topic autocreation
            * Join group if ``group_id`` provided
        """
        assert self._fetcher is None, "Did you call `start` twice?"
        yield from self._client.bootstrap()
        yield from self._wait_topics()

        if self._client.api_version < (0, 9):
            raise ValueError("Unsupported Kafka version: {}".format(
                self._client.api_version))

        if self._isolation_level == "read_committed" and \
                self._client.api_version < (0, 11):
            raise UnsupportedVersionError(
                "`read_committed` isolation_level available only for Brokers "
                "0.11 and above")

        self._fetcher = Fetcher(
            self._client, self._subscription, loop=self._loop,
            key_deserializer=self._key_deserializer,
            value_deserializer=self._value_deserializer,
            fetch_min_bytes=self._fetch_min_bytes,
            fetch_max_bytes=self._fetch_max_bytes,
            fetch_max_wait_ms=self._fetch_max_wait_ms,
            max_partition_fetch_bytes=self._max_partition_fetch_bytes,
            check_crcs=self._check_crcs,
            fetcher_timeout=self._consumer_timeout,
            retry_backoff_ms=self._retry_backoff_ms,
            auto_offset_reset=self._auto_offset_reset,
            isolation_level=self._isolation_level)

        if self._group_id is not None:
            # using group coordinator for automatic partitions assignment
            self._coordinator = GroupCoordinator(
                self._client, self._subscription, loop=self._loop,
                group_id=self._group_id,
                heartbeat_interval_ms=self._heartbeat_interval_ms,
                session_timeout_ms=self._session_timeout_ms,
                retry_backoff_ms=self._retry_backoff_ms,
                enable_auto_commit=self._enable_auto_commit,
                auto_commit_interval_ms=self._auto_commit_interval_ms,
                assignors=self._partition_assignment_strategy,
                exclude_internal_topics=self._exclude_internal_topics)
            if self._subscription.subscription is not None:
                if self._subscription.partitions_auto_assigned():
                    # Either we passed `topics` to constructor or `subscribe`
                    # was called before `start`
                    yield from self._wait_for_data_or_error(
                        self._subscription.wait_for_assignment(),
                        shield=False
                    )
                else:
                    # `assign` was called before `start`. We did not start
                    # this task on that call, as coordinator was yet to be
                    # created
                    self._coordinator.start_commit_offsets_refresh_task(
                        self._subscription.subscription.assignment)
        else:
            # Using a simple assignment coordinator for reassignment on
            # metadata changes
            self._coordinator = NoGroupCoordinator(
                self._client, self._subscription, loop=self._loop,
                exclude_internal_topics=self._exclude_internal_topics)

            if self._subscription.subscription is not None:
                if self._subscription.partitions_auto_assigned():
                    # Either we passed `topics` to constructor or `subscribe`
                    # was called before `start`
                    yield from self._client.force_metadata_update()
                    self._coordinator.assign_all_partitions(check_unknown=True)
                else:
                    self._coordinator.reset_committed()

    @asyncio.coroutine
    def _wait_topics(self):
        if self._subscription.subscription is not None:
            for topic in self._subscription.subscription.topics:
                yield from self._client._wait_on_metadata(topic)

    def _validate_topics(self, topics):
        if not isinstance(topics, (tuple, set, list)):
            raise ValueError("Topics should be list of strings")
        return set(topics)

    def assign(self, partitions):
        """ Manually assign a list of TopicPartitions to this consumer.

        This interface does not support incremental assignment and will
        replace the previous assignment (if there was one).

        Arguments:
            partitions (list of TopicPartition): assignment for this instance.

        Raises:
            IllegalStateError: if consumer has already called subscribe()

        Warning:
            It is not possible to use both manual partition assignment with
            assign() and group assignment with subscribe().

        Note:
            Manual topic assignment through this method does not use the
            consumer's group management functionality. As such, there will be
            **no rebalance operation triggered** when group membership or
            cluster and topic metadata change.
        """
        self._subscription.assign_from_user(partitions)
        self._client.set_topics([tp.topic for tp in partitions])

        # If called before `start` we will delegate this to `start` call
        if self._coordinator is not None:
            if self._group_id is not None:
                # refresh commit positions for all assigned partitions
                assignment = self._subscription.subscription.assignment
                self._coordinator.start_commit_offsets_refresh_task(assignment)
            else:
                self._coordinator.reset_committed()

    def assignment(self):
        """ Get the set of partitions currently assigned to this consumer.

        If partitions were directly assigned using ``assign()``, then this will
        simply return the same partitions that were previously assigned.

        If topics were subscribed using ``subscribe()``, then this will give
        the set of topic partitions currently assigned to the consumer (which
        may be empty if the assignment hasn't happened yet or if the partitions
        are in the process of being reassigned).

        Returns:
            set: {TopicPartition, ...}
        """
        return self._subscription.assigned_partitions()

    @asyncio.coroutine
    def stop(self):
        """ Close the consumer, while waiting for finilizers:

            * Commit last consumed message if autocommit enabled
            * Leave group if used Consumer Groups
        """
        if self._closed:
            return
        log.debug("Closing the KafkaConsumer.")
        self._closed = True
        if self._coordinator:
            yield from self._coordinator.close()
        if self._fetcher:
            yield from self._fetcher.close()
        yield from self._client.close()
        log.debug("The KafkaConsumer has closed.")

    @asyncio.coroutine
    def commit(self, offsets=None):
        """ Commit offsets to Kafka.

        This commits offsets only to Kafka. The offsets committed using this
        API will be used on the first fetch after every rebalance and also on
        startup. As such, if you need to store offsets in anything other than
        Kafka, this API should not be used.

        Currently only supports kafka-topic offset storage (not zookeeper)

        When explicitly passing ``offsets`` use either offset of next record,
        or tuple of offset and metadata::

            tp = TopicPartition(msg.topic, msg.partition)
            metadata = "Some utf-8 metadata"
            # Either
            await consumer.commit({tp: msg.offset + 1})
            # Or position directly
            await consumer.commit({tp: (msg.offset + 1, metadata)})

        .. note:: If you want `fire and forget` commit, like ``commit_async()``
            in *kafka-python*, just run it in a task. Something like::

                fut = loop.create_task(consumer.commit())
                fut.add_done_callback(on_commit_done)

        Arguments:
            offsets (dict, optional): {TopicPartition: (offset, metadata)} dict
                to commit with the configured ``group_id``. Defaults to current
                consumed offsets for all subscribed partitions.
        Raises:
            IllegalOperation: If used with ``group_id == None``.
            IllegalStateError: If partitions not assigned.
            ValueError: If offsets is of wrong format.
            CommitFailedError: If membership already changed on broker.
            KafkaError: If commit failed on broker side. This could be due to
                invalid offset, too long metadata, authorization failure, etc.

        .. versionchanged:: 0.4.0

            Changed ``AssertionError`` to ``IllegalStateError`` in case of
            unassigned partition.

        .. versionchanged:: 0.4.0

            Will now raise ``CommitFailedError`` in case membership changed,
            as (posibly) this partition is handled by another consumer.
        """
        if self._group_id is None:
            raise IllegalOperation("Requires group_id")

        subscription = self._subscription.subscription
        if subscription is None:
            raise IllegalStateError("Not subscribed to any topics")
        assignment = subscription.assignment
        if assignment is None:
            raise IllegalStateError("No partitions assigned")

        if offsets is None:
            offsets = assignment.all_consumed_offsets()
        else:
            offsets = commit_structure_validate(offsets)
            for tp in offsets:
                if tp not in assignment.tps:
                    raise IllegalStateError(
                        "Partition {} is not assigned".format(tp))

        yield from self._coordinator.commit_offsets(assignment, offsets)

    @asyncio.coroutine
    def committed(self, partition):
        """ Get the last committed offset for the given partition. (whether the
        commit happened by this process or another).

        This offset will be used as the position for the consumer in the event
        of a failure.

        This call will block to do a remote call to get the latest offset, as
        those are not cached by consumer (Transactional Producer can change
        them without Consumer knowledge as of Kafka 0.11.0)

        Arguments:
            partition (TopicPartition): the partition to check

        Returns:
            The last committed offset, or None if there was no prior commit.

        Raises:
            IllegalOperation: If used with ``group_id == None``
        """
        if self._group_id is None:
            raise IllegalOperation("Requires group_id")

        commit_map = yield from self._coordinator.fetch_committed_offsets(
            [partition])
        if partition in commit_map:
            committed = commit_map[partition].offset
            if committed == -1:
                committed = None
        else:
            committed = None
        return committed

    @asyncio.coroutine
    def topics(self):
        """ Get all topics the user is authorized to view.

        Returns:
            set: topics
        """
        cluster = yield from self._client.fetch_all_metadata()
        return cluster.topics()

    def partitions_for_topic(self, topic):
        """ Get metadata about the partitions for a given topic.

        This method will return `None` if Consumer does not already have
        metadata for this topic.

        Arguments:
            topic (str): topic to check

        Returns:
            set: partition ids
        """
        return self._client.cluster.partitions_for_topic(topic)

    @asyncio.coroutine
    def position(self, partition):
        """ Get the offset of the *next record* that will be fetched (if a
        record with that offset exists on broker).

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int: offset

        Raises:
            IllegalStateError: partition is not assigned

        .. versionchanged:: 0.4.0

            Changed ``AssertionError`` to ``IllegalStateError`` in case of
            unassigned partition
        """
        while True:
            if not self._subscription.is_assigned(partition):
                raise IllegalStateError(
                    'Partition {} is not assigned'.format(partition))
            assignment = self._subscription.subscription.assignment
            tp_state = assignment.state_value(partition)
            if not tp_state.has_valid_position:
                yield from asyncio.wait(
                    [tp_state.wait_for_position(),
                     assignment.unassign_future],
                    return_when=asyncio.FIRST_COMPLETED, loop=self._loop,
                )
                if not tp_state.has_valid_position:
                    if self._subscription.subscription is None:
                        raise IllegalStateError(
                            'Partition {} is not assigned'.format(partition))
                    if self._subscription.subscription.assignment is None:
                        yield from self._subscription.wait_for_assignment()
                    continue
            return tp_state.position

    def highwater(self, partition):
        """ Last known highwater offset for a partition.

        A highwater offset is the offset that will be assigned to the next
        message that is produced. It may be useful for calculating lag, by
        comparing with the reported position. Note that both position and
        highwater refer to the *next* offset – i.e., highwater offset is one
        greater than the newest available message.

        Highwater offsets are returned as part of ``FetchResponse``, so will
        not be available if messages for this partition were not requested yet.

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int or None: offset if available
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        assignment = self._subscription.subscription.assignment
        return assignment.state_value(partition).highwater

    def last_stable_offset(self, partition):
        """ Returns the Last Stable Offset of a topic. It will be the last
        offset up to which point all transactions were completed. Only
        available in with isolation_level `read_committed`, in
        `read_uncommitted` will always return -1. Will return None for older
        Brokers.

        As with ``highwater()`` will not be available until some messages are
        consumed.

        Arguments:
            partition (TopicPartition): partition to check

        Returns:
            int or None: offset if available
        """
        assert self._subscription.is_assigned(partition), \
            'Partition is not assigned'
        assignment = self._subscription.subscription.assignment
        return assignment.state_value(partition).lso

    def seek(self, partition, offset):
        """ Manually specify the fetch offset for a TopicPartition.

        Overrides the fetch offsets that the consumer will use on the next
        ``getmany()``/``getone()`` call. If this API is invoked for the same
        partition more than once, the latest offset will be used on the next
        fetch.

        Note:
            You may lose data if this API is arbitrarily used in the middle
            of consumption to reset the fetch offsets. Use it either on
            rebalance listeners or after all pending messages are processed.

        Arguments:
            partition (TopicPartition): partition for seek operation
            offset (int): message offset in partition

        Raises:
            ValueError: if offset is not a positive integer
            IllegalStateError: partition is not currently assigned

        .. versionchanged:: 0.4.0

            Changed ``AssertionError`` to ``IllegalStateError`` and
            ``ValueError`` in respective cases.
        """
        if not isinstance(offset, int) or offset < 0:
            raise ValueError("Offset must be a positive integer")
        log.debug("Seeking to offset %s for partition %s", offset, partition)
        self._fetcher.seek_to(partition, offset)

    @asyncio.coroutine
    def seek_to_beginning(self, *partitions):
        """ Seek to the oldest available offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            TypeError: If partitions are not instances of TopicPartition

        .. versionadded:: 0.3.0

        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to beginning of partition %s", tp)
        yield from self._fetcher.request_offset_reset(
            partitions, OffsetResetStrategy.EARLIEST)

    @asyncio.coroutine
    def seek_to_end(self, *partitions):
        """Seek to the most recent available offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            TypeError: If partitions are not instances of TopicPartition

        .. versionadded:: 0.3.0

        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            log.debug("Seeking to end of partition %s", tp)
        yield from self._fetcher.request_offset_reset(
            partitions, OffsetResetStrategy.LATEST)

    @asyncio.coroutine
    def seek_to_committed(self, *partitions):
        """ Seek to the committed offset for partitions.

        Arguments:
            *partitions: Optionally provide specific TopicPartitions, otherwise
                default to all assigned partitions.

        Raises:
            IllegalStateError: If any partition is not currently assigned
            IllegalOperation: If used with ``group_id == None``

        .. versionchanged:: 0.3.0

            Changed ``AssertionError`` to ``IllegalStateError`` in case of
            unassigned partition
        """
        if not all([isinstance(p, TopicPartition) for p in partitions]):
            raise TypeError('partitions must be TopicPartition instances')

        if not partitions:
            partitions = self._subscription.assigned_partitions()
            assert partitions, 'No partitions are currently assigned'
        else:
            not_assigned = (
                set(partitions) - self._subscription.assigned_partitions()
            )
            if not_assigned:
                raise IllegalStateError(
                    "Partitions {} are not assigned".format(not_assigned))

        for tp in partitions:
            offset = yield from self.committed(tp)
            log.debug("Seeking to committed of partition %s %s", tp, offset)
            if offset and offset > 0:
                self._fetcher.seek_to(tp, offset)

    @asyncio.coroutine
    def offsets_for_times(self, timestamps):
        """
        Look up the offsets for the given partitions by timestamp. The returned
        offset for each partition is the earliest offset whose timestamp is
        greater than or equal to the given timestamp in the corresponding
        partition.

        The consumer does not have to be assigned the partitions.

        If the message format version in a partition is before 0.10.0, i.e.
        the messages do not have timestamps, ``None`` will be returned for that
        partition.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            timestamps (dict): ``{TopicPartition: int}`` mapping from partition
                to the timestamp to look up. Unit should be milliseconds since
                beginning of the epoch (midnight Jan 1, 1970 (UTC))

        Returns:
            dict: ``{TopicPartition: OffsetAndTimestamp}`` mapping from
            partition to the timestamp and offset of the first message with
            timestamp greater than or equal to the target timestamp.

        Raises:
            ValueError: If the target timestamp is negative
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        for tp, ts in timestamps.items():
            timestamps[tp] = int(ts)
            if ts < 0:
                raise ValueError(
                    "The target time for partition {} is {}. The target time "
                    "cannot be negative.".format(tp, ts))
        offsets = yield from self._fetcher.get_offsets_by_times(
            timestamps, self._request_timeout_ms)
        return offsets

    @asyncio.coroutine
    def beginning_offsets(self, partitions):
        """ Get the first offset for the given partitions.

        This method does not change the current consumer position of the
        partitions.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            partitions (list): List of TopicPartition instances to fetch
                offsets for.

        Returns:
            dict: ``{TopicPartition: int}`` mapping of partition to  earliest
            available offset.

        Raises:
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms.

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        offsets = yield from self._fetcher.beginning_offsets(
            partitions, self._request_timeout_ms)
        return offsets

    @asyncio.coroutine
    def end_offsets(self, partitions):
        """ Get the last offset for the given partitions. The last offset of a
        partition is the offset of the upcoming message, i.e. the offset of the
        last available message + 1.

        This method does not change the current consumer position of the
        partitions.

        Note:
            This method may block indefinitely if the partition does not exist.

        Arguments:
            partitions (list): List of TopicPartition instances to fetch
                offsets for.

        Returns:
            dict: ``{TopicPartition: int}`` mapping of partition to last
            available offset + 1.

        Raises:
            UnsupportedVersionError: If the broker does not support looking
                up the offsets by timestamp.
            KafkaTimeoutError: If fetch failed in request_timeout_ms

        .. versionadded:: 0.3.0

        """
        if self._client.api_version <= (0, 10, 0):
            raise UnsupportedVersionError(
                "offsets_for_times API not supported for cluster version {}"
                .format(self._client.api_version))
        offsets = yield from self._fetcher.end_offsets(
            partitions, self._request_timeout_ms)
        return offsets

    def subscribe(self, topics=(), pattern=None, listener=None):
        """ Subscribe to a list of topics, or a topic regex pattern.

        Partitions will be dynamically assigned via a group coordinator.
        Topic subscriptions are not incremental: this list will replace the
        current assignment (if there is one).

        This method is incompatible with ``assign()``.

        Arguments:
           topics (list): List of topics for subscription.
           pattern (str): Pattern to match available topics. You must provide
               either topics or pattern, but not both.
           listener (ConsumerRebalanceListener): Optionally include listener
               callback, which will be called before and after each rebalance
               operation.
               As part of group management, the consumer will keep track of
               the list of consumers that belong to a particular group and
               will trigger a rebalance operation if one of the following
               events trigger:

               * Number of partitions change for any of the subscribed topics
               * Topic is created or deleted
               * An existing member of the consumer group dies
               * A new member is added to the consumer group

               When any of these events are triggered, the provided listener
               will be invoked first to indicate that the consumer's
               assignment has been revoked, and then again when the new
               assignment has been received. Note that this listener will
               immediately override any listener set in a previous call
               to subscribe. It is guaranteed, however, that the partitions
               revoked/assigned
               through this interface are from topics subscribed in this call.
        Raises:
            IllegalStateError: if called after previously calling assign()
            ValueError: if neither topics or pattern is provided or both
               are provided
            TypeError: if listener is not a ConsumerRebalanceListener
        """
        if not (topics or pattern):
            raise ValueError(
                "You should provide either `topics` or `pattern`")
        if topics and pattern:
            raise ValueError(
                "You can't provide both `topics` and `pattern`")
        if listener is not None and \
                not isinstance(listener, ConsumerRebalanceListener):
            raise TypeError(
                "listener should be an instance of ConsumerRebalanceListener")
        if pattern is not None:
            try:
                pattern = re.compile(pattern)
            except re.error as err:
                raise ValueError(
                    "{!r} is not a valid pattern: {}".format(pattern, err))
            self._subscription.subscribe_pattern(
                pattern=pattern, listener=listener)
            # NOTE: set_topics will trigger a rebalance, so the coordinator
            # will get the initial subscription shortly by ``metadata_changed``
            # handler.
            self._client.set_topics([])
            log.info("Subscribed to topic pattern: %s", pattern)
        elif topics:
            topics = self._validate_topics(topics)
            self._subscription.subscribe(
                topics=topics, listener=listener)
            self._client.set_topics(self._subscription.subscription.topics)
            log.info("Subscribed to topic(s): %s", topics)

    def subscription(self):
        """ Get the current topic subscription.

        Returns:
            frozenset: {topic, ...}
        """
        return self._subscription.topics

    def unsubscribe(self):
        """ Unsubscribe from all topics and clear all assigned partitions. """
        self._subscription.unsubscribe()
        if self._group_id is not None:
            self._coordinator.maybe_leave_group()
        self._client.set_topics([])
        log.info(
            "Unsubscribed all topics or patterns and assigned partitions")

    def _wait_for_data_or_error(self, coro, *, shield):
        futs = [self._fetcher.error_future]
        coordination_error_fut = self._coordinator.error_future
        if coordination_error_fut is not None:  # group_id is None case
            futs.append(coordination_error_fut)

        return wait_for_reponse_or_error(
            coro, futs, shield=shield, loop=self._loop)

    @asyncio.coroutine
    def getone(self, *partitions):
        """
        Get one message from Kafka.
        If no new messages prefetched, this method will wait for it.

        Arguments:
            partitions (List[TopicPartition]): Optional list of partitions to
                return from. If no partitions specified then returned message
                will be from any partition, which consumer is subscribed to.

        Returns:
            ConsumerRecord

        Will return instance of

        .. code:: python

            collections.namedtuple(
                "ConsumerRecord",
                ["topic", "partition", "offset", "key", "value"])

        Example usage:


        .. code:: python

            while True:
                message = await consumer.getone()
                topic = message.topic
                partition = message.partition
                # Process message
                print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if self._closed:
            raise ConsumerStoppedError()

        msg = yield from self._wait_for_data_or_error(
            self._fetcher.next_record(partitions), shield=False)
        return msg

    @asyncio.coroutine
    def getmany(self, *partitions, timeout_ms=0, max_records=None):
        """Get messages from assigned topics / partitions.

        Prefetched messages are returned in batches by topic-partition.
        If messages is not available in the prefetched buffer this method waits
        `timeout_ms` milliseconds.

        Arguments:
            partitions (List[TopicPartition]): The partitions that need
                fetching message. If no one partition specified then all
                subscribed partitions will be used
            timeout_ms (int, optional): milliseconds spent waiting if
                data is not available in the buffer. If 0, returns immediately
                with any records that are available currently in the buffer,
                else returns empty. Must not be negative. Default: 0
        Returns:
            dict: topic to list of records since the last fetch for the
                subscribed list of topics and partitions

        Example usage:


        .. code:: python

            data = await consumer.getmany()
            for tp, messages in data.items():
                topic = tp.topic
                partition = tp.partition
                for message in messages:
                    # Process message
                    print(message.offset, message.key, message.value)

        """
        assert all(map(lambda k: isinstance(k, TopicPartition), partitions))
        if self._closed:
            raise ConsumerStoppedError()

        if max_records is not None and (
                not isinstance(max_records, int) or max_records < 1):
            raise ValueError("`max_records` must be a positive Integer")

        timeout = timeout_ms / 1000
        records = yield from self._wait_for_data_or_error(
            self._fetcher.fetched_records(
                partitions, timeout,
                max_records=max_records or self._max_poll_records),
            shield=False
        )
        return records

    if PY_35:
        def __aiter__(self):
            if self._closed:
                raise ConsumerStoppedError()
            return self

        # Old 3.5 versions require a coroutine
        if not PY_352:
            __aiter__ = asyncio.coroutine(__aiter__)

        @asyncio.coroutine
        def __anext__(self):
            """Asyncio iterator interface for consumer

            Note:
                TopicAuthorizationFailedError and OffsetOutOfRangeError
                exceptions can be raised in iterator.
                All other KafkaError exceptions will be logged and not raised
            """
            while True:
                try:
                    return (yield from self.getone())
                except ConsumerStoppedError:
                    raise StopAsyncIteration  # noqa: F821
                except (TopicAuthorizationFailedError,
                        OffsetOutOfRangeError,
                        NoOffsetForPartitionError) as err:
                    raise err
                except RecordTooLargeError:
                    log.exception("error in consumer iterator: %s")
Пример #39
0
def async_run(func):
    ioloop = get_event_loop()
    work = coroutine(func)
    ioloop.run_until_complete(work())
Пример #40
0
def _ensure_coroutine_function(fn):
    return fn if asyncio.iscoroutinefunction(fn) else asyncio.coroutine(fn)
Пример #41
0
 def decorator(*args, **kwargs):
     loop = asyncio.get_event_loop()
     coro = asyncio.coroutine(f)
     kwargs['loop'] = loop
     future = coro(*args, **kwargs)
     loop.run_until_complete(future)
Пример #42
0
 def on_request(self, request, handler, **kwargs):
     if not asyncio.iscoroutinefunction(handler):
         handler = asyncio.coroutine(handler)
     options = {**kwargs, "wait": False}
     self.routers["request"].register(request, (handler, options))
Пример #43
0
 def wrapper(testcase, *args, **kwargs):
     coro = asyncio.coroutine(f)
     future = asyncio.wait_for(coro(testcase, *args, **kwargs),
                               timeout=testcase.timeout)
     return testcase.loop.run_until_complete(future)
Пример #44
0
 def async_validating_transport(requests, responses):
     sync_transport = validating_transport(requests, responses)
     return mock.Mock(send=asyncio.coroutine(sync_transport.send))
Пример #45
0
 def wrapper(*args, **kwargs):
     # Both sync and async support.
     async_function = asyncio.coroutine(function)
     loop = asyncio.get_event_loop()
     loop.run_until_complete(async_function(*args, **kwargs))
Пример #46
0
 def cleaner(self, coro):
     """ Function decorator for a cleanup coroutine. """
     if not asyncio.iscoroutinefunction(coro):
         coro = asyncio.coroutine(coro)
     self.add_cleaner(coro)
     return coro
Пример #47
0
    def test_returns_coroutine_with_return_value_being_a_coroutine(self, klass):
        mock = klass()
        coroutine = asyncio.coroutine(lambda: 'ProbeValue')
        mock.return_value = coroutine()

        self.assertEqual('ProbeValue', run_coroutine(mock()))
Пример #48
0
class BodyPartReader(object):
    """Multipart reader for single body part."""

    chunk_size = 8192

    def __init__(self, boundary, headers, content):
        self.headers = headers
        self._boundary = boundary
        self._content = content
        self._at_eof = False
        length = self.headers.get(CONTENT_LENGTH, None)
        self._length = int(length) if length is not None else None
        self._read_bytes = 0
        self._unread = deque()
        self._prev_chunk = None
        self._content_eof = 0

    if PY_35:
        def __aiter__(self):
            return self

        if not PY_352:  # pragma: no cover
            __aiter__ = asyncio.coroutine(__aiter__)

        @asyncio.coroutine
        def __anext__(self):
            part = yield from self.next()
            if part is None:
                raise StopAsyncIteration  # NOQA
            return part

    @asyncio.coroutine
    def next(self):
        item = yield from self.read()
        if not item:
            return None
        return item

    @asyncio.coroutine
    def read(self, *, decode=False):
        """Reads body part data.

        :param bool decode: Decodes data following by encoding
                            method from `Content-Encoding` header. If it missed
                            data remains untouched

        :rtype: bytearray
        """
        if self._at_eof:
            return b''
        data = bytearray()
        if self._length is None:
            while not self._at_eof:
                data.extend((yield from self.readline()))
        else:
            while not self._at_eof:
                data.extend((yield from self.read_chunk(self.chunk_size)))
        if decode:
            return self.decode(data)
        return data

    @asyncio.coroutine
    def read_chunk(self, size=chunk_size):
        """Reads body part content chunk of the specified size.

        :param int size: chunk size

        :rtype: bytearray
        """
        if self._at_eof:
            return b''
        if self._length:
            chunk = yield from self._read_chunk_from_length(size)
        else:
            chunk = yield from self._read_chunk_from_stream(size)

        self._read_bytes += len(chunk)
        if self._read_bytes == self._length:
            self._at_eof = True
        if self._at_eof:
            assert b'\r\n' == (yield from self._content.readline()), \
                'reader did not read all the data or it is malformed'
        return chunk

    @asyncio.coroutine
    def _read_chunk_from_length(self, size):
        """Reads body part content chunk of the specified size.
        The body part must has `Content-Length` header with proper value.

        :param int size: chunk size

        :rtype: bytearray
        """
        assert self._length is not None, \
            'Content-Length required for chunked read'
        chunk_size = min(size, self._length - self._read_bytes)
        chunk = yield from self._content.read(chunk_size)
        return chunk

    @asyncio.coroutine
    def _read_chunk_from_stream(self, size):
        """Reads content chunk of body part with unknown length.
        The `Content-Length` header for body part is not necessary.

        :param int size: chunk size

        :rtype: bytearray
        """
        assert size >= len(self._boundary) + 2, \
            'Chunk size must be greater or equal than boundary length + 2'
        first_chunk = self._prev_chunk is None
        if first_chunk:
            self._prev_chunk = yield from self._content.read(size)

        chunk = yield from self._content.read(size)
        self._content_eof += int(self._content.at_eof())
        assert self._content_eof < 3, "Reading after EOF"
        window = self._prev_chunk + chunk
        sub = b'\r\n' + self._boundary
        if first_chunk:
            idx = window.find(sub)
        else:
            idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
        if idx >= 0:
            # pushing boundary back to content
            self._content.unread_data(window[idx:])
            if size > idx:
                self._prev_chunk = self._prev_chunk[:idx]
            chunk = window[len(self._prev_chunk):idx]
            if not chunk:
                self._at_eof = True
        if 0 < len(chunk) < len(sub) and not self._content_eof:
            self._prev_chunk += chunk
            self._at_eof = False
            return b''
        result = self._prev_chunk
        self._prev_chunk = chunk
        return result

    @asyncio.coroutine
    def readline(self):
        """Reads body part by line by line.

        :rtype: bytearray
        """
        if self._at_eof:
            return b''

        if self._unread:
            line = self._unread.popleft()
        else:
            line = yield from self._content.readline()

        if line.startswith(self._boundary):
            # the very last boundary may not come with \r\n,
            # so set single rules for everyone
            sline = line.rstrip(b'\r\n')
            boundary = self._boundary
            last_boundary = self._boundary + b'--'
            # ensure that we read exactly the boundary, not something alike
            if sline == boundary or sline == last_boundary:
                self._at_eof = True
                self._unread.append(line)
                return b''
        else:
            next_line = yield from self._content.readline()
            if next_line.startswith(self._boundary):
                line = line[:-2]  # strip CRLF but only once
            self._unread.append(next_line)

        return line

    @asyncio.coroutine
    def release(self):
        """Like :meth:`read`, but reads all the data to the void.

        :rtype: None
        """
        if self._at_eof:
            return
        if self._length is None:
            while not self._at_eof:
                yield from self.readline()
        else:
            while not self._at_eof:
                yield from self.read_chunk(self.chunk_size)

    @asyncio.coroutine
    def text(self, *, encoding=None):
        """Like :meth:`read`, but assumes that body part contains text data.

        :param str encoding: Custom text encoding. Overrides specified
                             in charset param of `Content-Type` header

        :rtype: str
        """
        data = yield from self.read(decode=True)
        encoding = encoding or self.get_charset(default='latin1')
        return data.decode(encoding)

    @asyncio.coroutine
    def json(self, *, encoding=None):
        """Like :meth:`read`, but assumes that body parts contains JSON data.

        :param str encoding: Custom JSON encoding. Overrides specified
                             in charset param of `Content-Type` header
        """
        data = yield from self.read(decode=True)
        if not data:
            return None
        encoding = encoding or self.get_charset(default='utf-8')
        return json.loads(data.decode(encoding))

    @asyncio.coroutine
    def form(self, *, encoding=None):
        """Like :meth:`read`, but assumes that body parts contains form
        urlencoded data.

        :param str encoding: Custom form encoding. Overrides specified
                             in charset param of `Content-Type` header
        """
        data = yield from self.read(decode=True)
        if not data:
            return None
        encoding = encoding or self.get_charset(default='utf-8')
        return parse_qsl(data.rstrip().decode(encoding), encoding=encoding)

    def at_eof(self):
        """Returns ``True`` if the boundary was reached or
        ``False`` otherwise.

        :rtype: bool
        """
        return self._at_eof

    def decode(self, data):
        """Decodes data according the specified `Content-Encoding`
        or `Content-Transfer-Encoding` headers value.

        Supports ``gzip``, ``deflate`` and ``identity`` encodings for
        `Content-Encoding` header.

        Supports ``base64``, ``quoted-printable``, ``binary`` encodings for
        `Content-Transfer-Encoding` header.

        :param bytearray data: Data to decode.

        :raises: :exc:`RuntimeError` - if encoding is unknown.

        :rtype: bytes
        """
        if CONTENT_TRANSFER_ENCODING in self.headers:
            data = self._decode_content_transfer(data)
        if CONTENT_ENCODING in self.headers:
            return self._decode_content(data)
        return data

    def _decode_content(self, data):
        encoding = self.headers[CONTENT_ENCODING].lower()

        if encoding == 'deflate':
            return zlib.decompress(data, -zlib.MAX_WBITS)
        elif encoding == 'gzip':
            return zlib.decompress(data, 16 + zlib.MAX_WBITS)
        elif encoding == 'identity':
            return data
        else:
            raise RuntimeError('unknown content encoding: {}'.format(encoding))

    def _decode_content_transfer(self, data):
        encoding = self.headers[CONTENT_TRANSFER_ENCODING].lower()

        if encoding == 'base64':
            return base64.b64decode(data)
        elif encoding == 'quoted-printable':
            return binascii.a2b_qp(data)
        elif encoding == 'binary':
            return data
        else:
            raise RuntimeError('unknown content transfer encoding: {}'
                               ''.format(encoding))

    def get_charset(self, default=None):
        """Returns charset parameter from ``Content-Type`` header or default.
        """
        ctype = self.headers.get(CONTENT_TYPE, '')
        *_, params = parse_mimetype(ctype)
        return params.get('charset', default)

    @property
    def filename(self):
        """Returns filename specified in Content-Disposition header or ``None``
        if missed or header is malformed."""
        _, params = parse_content_disposition(
            self.headers.get(CONTENT_DISPOSITION))
        return content_disposition_filename(params)
Пример #49
0
    def test_user_orders_move_valid_data(self):
        async def run_test():

            market = 'okex'

            lib = BitsgapClient(public_key, private_key)

            # get open orders
            result = lib.orders_open(market)

            logging.debug(result)

            self.assertIsNotNone(result)

            self.assertTrue(result['status'] == 'ok')
            self.assertIn('data', result)

            data = result['data']

            self.assertIsNotNone(data)
            self.assertTrue(len(data) > 0)
            self.assertIn('id', data[0])

            id = data[0]['id']
            self.assertIsNotNone(id)

            old_price = data[0]['price']
            self.assertIsNotNone(old_price)
            new_price = float(old_price) * 0.999

            result_move = lib.orders_move(market, id, str(new_price))

            logging.debug(result_move)

            self.assertIn('status', result_move)
            self.assertTrue(result_move['status'] == 'ok')
            self.assertIn('data', result_move)
            self.assertIn('time', result_move)

            data = result_move['data']

            self.assertIsNotNone(data)

            self.assertIn('id', data)
            self.assertIn('price', data)
            self.assertIn('amount_init', data)
            self.assertIn('pair', data)
            self.assertIn('state', data)
            self.assertIn('type', data)
            self.assertIn('side', data)
            self.assertIn('uts', data)
            self.assertIn('amount', data)

            await asyncio.sleep(1)

        event_loop = asyncio.new_event_loop()
        asyncio.set_event_loop(event_loop)

        coro = asyncio.coroutine(run_test)
        event_loop.run_until_complete(coro())
        event_loop.close()
Пример #50
0
 def add_on_return_callback(self, callback: FunctionOrCoroutine) -> None:
     self._on_return_callbacks.append(asyncio.coroutine(callback))
Пример #51
0
 def wrapper(*args, **kwargs):
     coro = asyncio.coroutine(f)
     future = coro(*args, **kwargs)
     testLoop.run_until_complete(future)
Пример #52
0
class WebSocketResponse(StreamResponse):
    def __init__(self,
                 *,
                 timeout=10.0,
                 receive_timeout=None,
                 autoclose=True,
                 autoping=True,
                 heartbeat=None,
                 protocols=()):
        super().__init__(status=101)
        self._protocols = protocols
        self._protocol = None
        self._writer = None
        self._reader = None
        self._closed = False
        self._closing = False
        self._conn_lost = 0
        self._close_code = None
        self._loop = None
        self._waiting = False
        self._exception = None
        self._timeout = timeout
        self._receive_timeout = receive_timeout
        self._autoclose = autoclose
        self._autoping = autoping
        self._heartbeat = heartbeat
        self._heartbeat_cb = None
        self._pong_response_cb = None
        self._time_service = None

    def _cancel_heartbeat(self):
        if self._pong_response_cb is not None:
            self._pong_response_cb.cancel()
            self._pong_response_cb = None

        if self._heartbeat_cb is not None:
            self._heartbeat_cb.cancel()
            self._heartbeat_cb = None

    def _reset_heartbeat(self):
        self._cancel_heartbeat()

        if self._heartbeat is not None:
            self._heartbeat_cb = self._time_service.call_later(
                self._heartbeat, self._send_heartbeat)

    def _send_heartbeat(self):
        if self._heartbeat is not None and not self._closed:
            self.ping()

            if self._pong_response_cb is not None:
                self._pong_response_cb.cancel()
            self._pong_response_cb = self._time_service.call_later(
                self._heartbeat / 2.0, self._pong_not_received)

    def _pong_not_received(self):
        self._closed = True
        self._close_code = 1006
        self._exception = asyncio.TimeoutError()

        if self._req is not None:
            self._req.transport.close()

    @asyncio.coroutine
    def prepare(self, request):
        # make pre-check to don't hide it by do_handshake() exceptions
        resp_impl = self._start_pre_check(request)
        if resp_impl is not None:
            return resp_impl

        parser, protocol, writer = self._pre_start(request)
        resp_impl = yield from super().prepare(request)
        self._post_start(request, parser, protocol, writer)
        return resp_impl

    def _pre_start(self, request):
        try:
            status, headers, parser, writer, protocol = do_handshake(
                request.method, request.headers, request.transport,
                self._protocols)
        except HttpProcessingError as err:
            if err.code == 405:
                raise HTTPMethodNotAllowed(request.method, [hdrs.METH_GET],
                                           body=b'')
            elif err.code == 400:
                raise HTTPBadRequest(text=err.message, headers=err.headers)
            else:  # pragma: no cover
                raise HTTPInternalServerError() from err

        self._time_service = request.time_service
        self._reset_heartbeat()

        if self.status != status:
            self.set_status(status)
        for k, v in headers:
            self.headers[k] = v
        self.force_close()
        return parser, protocol, writer

    def _post_start(self, request, parser, protocol, writer):
        self._reader = request._reader.set_parser(parser)
        self._writer = writer
        self._protocol = protocol
        self._loop = request.app.loop

    def start(self, request):
        warnings.warn('use .prepare(request) instead', DeprecationWarning)
        # make pre-check to don't hide it by do_handshake() exceptions
        resp_impl = self._start_pre_check(request)
        if resp_impl is not None:
            return resp_impl

        parser, protocol, writer = self._pre_start(request)
        resp_impl = super().start(request)
        self._post_start(request, parser, protocol, writer)
        return resp_impl

    def can_prepare(self, request):
        if self._writer is not None:
            raise RuntimeError('Already started')
        try:
            _, _, _, _, protocol = do_handshake(request.method,
                                                request.headers,
                                                request.transport,
                                                self._protocols)
        except HttpProcessingError:
            return WebSocketReady(False, None)
        else:
            return WebSocketReady(True, protocol)

    def can_start(self, request):
        warnings.warn('use .can_prepare(request) instead', DeprecationWarning)
        return self.can_prepare(request)

    @property
    def closed(self):
        return self._closed

    @property
    def close_code(self):
        return self._close_code

    @property
    def protocol(self):
        return self._protocol

    def exception(self):
        return self._exception

    def ping(self, message='b'):
        if self._writer is None:
            raise RuntimeError('Call .prepare() first')
        if self._closed:
            raise RuntimeError('websocket connection is closing')
        self._writer.ping(message)

    def pong(self, message='b'):
        # unsolicited pong
        if self._writer is None:
            raise RuntimeError('Call .prepare() first')
        if self._closed:
            raise RuntimeError('websocket connection is closing')
        self._writer.pong(message)

    def send_str(self, data):
        if self._writer is None:
            raise RuntimeError('Call .prepare() first')
        if self._closed:
            raise RuntimeError('websocket connection is closing')
        if not isinstance(data, str):
            raise TypeError('data argument must be str (%r)' % type(data))
        self._writer.send(data, binary=False)

    def send_bytes(self, data):
        if self._writer is None:
            raise RuntimeError('Call .prepare() first')
        if self._closed:
            raise RuntimeError('websocket connection is closing')
        if not isinstance(data, (bytes, bytearray, memoryview)):
            raise TypeError('data argument must be byte-ish (%r)' % type(data))
        self._writer.send(data, binary=True)

    def send_json(self, data, *, dumps=json.dumps):
        self.send_str(dumps(data))

    @asyncio.coroutine
    def write_eof(self):
        if self._eof_sent:
            return
        if self._resp_impl is None:
            raise RuntimeError("Response has not been started")

        yield from self.close()
        self._eof_sent = True

    @asyncio.coroutine
    def close(self, *, code=1000, message=b''):
        if self._writer is None:
            raise RuntimeError('Call .prepare() first')

        if not self._closed:
            self._cancel_heartbeat()
            self._closed = True
            try:
                self._writer.close(code, message)
                yield from self.drain()
            except (asyncio.CancelledError, asyncio.TimeoutError):
                self._close_code = 1006
                raise
            except Exception as exc:
                self._close_code = 1006
                self._exception = exc
                return True

            if self._closing:
                return True

            try:
                with self._time_service.timeout(self._timeout):
                    msg = yield from self._reader.read()
            except asyncio.CancelledError:
                self._close_code = 1006
                raise
            except Exception as exc:
                self._close_code = 1006
                self._exception = exc
                return True

            if msg.type == WSMsgType.CLOSE:
                self._close_code = msg.data
                return True

            self._close_code = 1006
            self._exception = asyncio.TimeoutError()
            return True
        else:
            return False

    @asyncio.coroutine
    def receive(self, timeout=None):
        if self._reader is None:
            raise RuntimeError('Call .prepare() first')
        if self._waiting:
            raise RuntimeError('Concurrent call to receive() is not allowed')

        self._waiting = True
        try:
            while True:
                if self._closed:
                    self._conn_lost += 1
                    if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
                        raise RuntimeError('WebSocket connection is closed.')
                    return CLOSED_MESSAGE

                try:
                    with self._time_service.timeout(timeout
                                                    or self._receive_timeout):
                        msg = yield from self._reader.read()
                        self._reset_heartbeat()
                except (asyncio.CancelledError, asyncio.TimeoutError) as exc:
                    raise
                except WebSocketError as exc:
                    self._close_code = exc.code
                    yield from self.close(code=exc.code)
                    return WSMessage(WSMsgType.ERROR, exc, None)
                except ClientDisconnectedError:
                    self._closed = True
                    self._close_code = 1006
                    return WSMessage(WSMsgType.CLOSE, None, None)
                except Exception as exc:
                    self._exception = exc
                    self._closing = True
                    self._close_code = 1006
                    yield from self.close()
                    return WSMessage(WSMsgType.ERROR, exc, None)

                if msg.type == WSMsgType.CLOSE:
                    self._closing = True
                    self._close_code = msg.data
                    if not self._closed and self._autoclose:
                        yield from self.close()
                    return msg
                if msg.type == WSMsgType.PING and self._autoping:
                    self.pong(msg.data)
                elif msg.type == WSMsgType.PONG and self._autoping:
                    continue
                else:
                    return msg
        finally:
            self._waiting = False

    @asyncio.coroutine
    def receive_msg(self):
        warnings.warn(
            'receive_msg() coroutine is deprecated. use receive() instead',
            DeprecationWarning)
        return (yield from self.receive())

    @asyncio.coroutine
    def receive_str(self, *, timeout=None):
        msg = yield from self.receive(timeout)
        if msg.type != WSMsgType.TEXT:
            raise TypeError("Received message {}:{!r} is not str".format(
                msg.type, msg.data))
        return msg.data

    @asyncio.coroutine
    def receive_bytes(self, *, timeout=None):
        msg = yield from self.receive(timeout)
        if msg.type != WSMsgType.BINARY:
            raise TypeError("Received message {}:{!r} is not bytes".format(
                msg.type, msg.data))
        return msg.data

    @asyncio.coroutine
    def receive_json(self, *, loads=json.loads, timeout=None):
        data = yield from self.receive_str(timeout=timeout)
        return loads(data)

    def write(self, data):
        raise RuntimeError("Cannot call .write() for websocket")

    if PY_35:

        def __aiter__(self):
            return self

        if not PY_352:  # pragma: no cover
            __aiter__ = asyncio.coroutine(__aiter__)

        @asyncio.coroutine
        def __anext__(self):
            msg = yield from self.receive()
            if msg.type == WSMsgType.CLOSE or msg.type == WSMsgType.CLOSED:
                raise StopAsyncIteration  # NOQA
            return msg
Пример #53
0
 def wrapper(self, *args, **kw):
     coro = asyncio.coroutine(f)
     future = coro(self, *args, **kw)
     self.loop.run_until_complete(future)
Пример #54
0
 def _(fn):
     coro = asyncio.coroutine(fn)
     COMMANDS[name] = coro
     return coro
Пример #55
0
class MultipartReader(object):
    """Multipart body reader."""

    #: Response wrapper, used when multipart readers constructs from response.
    response_wrapper_cls = MultipartResponseWrapper
    #: Multipart reader class, used to handle multipart/* body parts.
    #: None points to type(self)
    multipart_reader_cls = None
    #: Body part reader class for non multipart/* content types.
    part_reader_cls = BodyPartReader

    def __init__(self, headers, content):
        self.headers = headers
        self._boundary = ('--' + self._get_boundary()).encode()
        self._content = content
        self._last_part = None
        self._at_eof = False
        self._at_bof = True
        self._unread = []

    if PY_35:
        def __aiter__(self):
            return self

        if not PY_352:  # pragma: no cover
            __aiter__ = asyncio.coroutine(__aiter__)

        @asyncio.coroutine
        def __anext__(self):
            part = yield from self.next()
            if part is None:
                raise StopAsyncIteration  # NOQA
            return part

    @classmethod
    def from_response(cls, response):
        """Constructs reader instance from HTTP response.

        :param response: :class:`~aiohttp.client.ClientResponse` instance
        """
        obj = cls.response_wrapper_cls(response, cls(response.headers,
                                                     response.content))
        return obj

    def at_eof(self):
        """Returns ``True`` if the final boundary was reached or
        ``False`` otherwise.

        :rtype: bool
        """
        return self._at_eof

    @asyncio.coroutine
    def next(self):
        """Emits the next multipart body part."""
        # So, if we're at BOF, we need to skip till the boundary.
        if self._at_eof:
            return
        yield from self._maybe_release_last_part()
        if self._at_bof:
            yield from self._read_until_first_boundary()
            self._at_bof = False
        else:
            yield from self._read_boundary()
        if self._at_eof:  # we just read the last boundary, nothing to do there
            return
        self._last_part = yield from self.fetch_next_part()
        return self._last_part

    @asyncio.coroutine
    def release(self):
        """Reads all the body parts to the void till the final boundary."""
        while not self._at_eof:
            item = yield from self.next()
            if item is None:
                break
            yield from item.release()

    @asyncio.coroutine
    def fetch_next_part(self):
        """Returns the next body part reader."""
        headers = yield from self._read_headers()
        return self._get_part_reader(headers)

    def _get_part_reader(self, headers):
        """Dispatches the response by the `Content-Type` header, returning
        suitable reader instance.

        :param dict headers: Response headers
        """
        ctype = headers.get(CONTENT_TYPE, '')
        mtype, *_ = parse_mimetype(ctype)
        if mtype == 'multipart':
            if self.multipart_reader_cls is None:
                return type(self)(headers, self._content)
            return self.multipart_reader_cls(headers, self._content)
        else:
            return self.part_reader_cls(self._boundary, headers, self._content)

    def _get_boundary(self):
        mtype, *_, params = parse_mimetype(self.headers[CONTENT_TYPE])

        assert mtype == 'multipart', 'multipart/* content type expected'

        if 'boundary' not in params:
            raise ValueError('boundary missed for Content-Type: %s'
                             % self.headers[CONTENT_TYPE])

        boundary = params['boundary']
        if len(boundary) > 70:
            raise ValueError('boundary %r is too long (70 chars max)'
                             % boundary)

        return boundary

    @asyncio.coroutine
    def _readline(self):
        if self._unread:
            return self._unread.pop()
        return (yield from self._content.readline())

    @asyncio.coroutine
    def _read_until_first_boundary(self):
        while True:
            chunk = yield from self._readline()
            if chunk == b'':
                raise ValueError("Could not find starting boundary %r"
                                 % (self._boundary))
            chunk = chunk.rstrip()
            if chunk == self._boundary:
                return
            elif chunk == self._boundary + b'--':
                self._at_eof = True
                return

    @asyncio.coroutine
    def _read_boundary(self):
        chunk = (yield from self._readline()).rstrip()
        if chunk == self._boundary:
            pass
        elif chunk == self._boundary + b'--':
            self._at_eof = True
        else:
            raise ValueError('Invalid boundary %r, expected %r'
                             % (chunk, self._boundary))

    @asyncio.coroutine
    def _read_headers(self):
        lines = [b'']
        while True:
            chunk = yield from self._content.readline()
            chunk = chunk.strip()
            lines.append(chunk)
            if not chunk:
                break
        parser = HttpParser()
        headers, *_ = parser.parse_headers(lines)
        return headers

    @asyncio.coroutine
    def _maybe_release_last_part(self):
        """Ensures that the last read body part is read completely."""
        if self._last_part is not None:
            if not self._last_part.at_eof():
                yield from self._last_part.release()
            self._unread.extend(self._last_part._unread)
            self._last_part = None
Пример #56
0
    def coro_wrapper(*args, **kwargs):
        """Return an async context manager wrapper for this coroutine"""

        return AsyncContextManager(asyncio.coroutine(coro)(*args, **kwargs))
def correct_aiter(func):  # pragma: no cover
    if sys.version_info >= (3, 5, 2):
        return func
    else:
        return asyncio.coroutine(func)
Пример #58
0
def AsyncMock():
    coro = mock.Mock(name="CoroutineResult")
    corofunc = mock.Mock(name="CoroutineFunction",
                         side_effect=asyncio.coroutine(coro))
    corofunc.coro = coro
    return corofunc
Пример #59
0
async def test_light(hass, config_entry, zha_gateway, monkeypatch):
    """Test zha light platform."""
    from zigpy.zcl.clusters.general import OnOff, LevelControl, Basic
    from zigpy.zcl.foundation import Status
    from zigpy.profiles.zha import DeviceType

    # create zigpy devices
    zigpy_device_on_off = await async_init_zigpy_device(
        hass, [OnOff.cluster_id, Basic.cluster_id], [],
        DeviceType.ON_OFF_LIGHT, zha_gateway)

    zigpy_device_level = await async_init_zigpy_device(
        hass, [OnOff.cluster_id, LevelControl.cluster_id, Basic.cluster_id],
        [],
        DeviceType.ON_OFF_LIGHT,
        zha_gateway,
        ieee="00:0d:6f:11:0a:90:69:e7",
        manufacturer="FakeLevelManufacturer",
        model="FakeLevelModel")

    # load up light domain
    await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
    await hass.async_block_till_done()

    # on off light
    on_off_device_on_off_cluster = zigpy_device_on_off.endpoints.get(1).on_off
    on_off_entity_id = make_entity_id(DOMAIN,
                                      zigpy_device_on_off,
                                      on_off_device_on_off_cluster,
                                      use_suffix=False)
    on_off_zha_device = zha_gateway.get_device(str(zigpy_device_on_off.ieee))

    # dimmable light
    level_device_on_off_cluster = zigpy_device_level.endpoints.get(1).on_off
    level_device_level_cluster = zigpy_device_level.endpoints.get(1).level
    on_off_mock = MagicMock(side_effect=asyncio.coroutine(
        MagicMock(return_value=(sentinel.data, Status.SUCCESS))))
    level_mock = MagicMock(side_effect=asyncio.coroutine(
        MagicMock(return_value=(sentinel.data, Status.SUCCESS))))
    monkeypatch.setattr(level_device_on_off_cluster, 'request', on_off_mock)
    monkeypatch.setattr(level_device_level_cluster, 'request', level_mock)
    level_entity_id = make_entity_id(DOMAIN,
                                     zigpy_device_level,
                                     level_device_on_off_cluster,
                                     use_suffix=False)
    level_zha_device = zha_gateway.get_device(str(zigpy_device_level.ieee))

    # test that the lights were created and that they are unavailable
    assert hass.states.get(on_off_entity_id).state == STATE_UNAVAILABLE
    assert hass.states.get(level_entity_id).state == STATE_UNAVAILABLE

    # allow traffic to flow through the gateway and device
    await async_enable_traffic(hass, zha_gateway,
                               [on_off_zha_device, level_zha_device])

    # test that the lights were created and are off
    assert hass.states.get(on_off_entity_id).state == STATE_OFF
    assert hass.states.get(level_entity_id).state == STATE_OFF

    # test turning the lights on and off from the light
    await async_test_on_off_from_light(hass, on_off_device_on_off_cluster,
                                       on_off_entity_id)

    await async_test_on_off_from_light(hass, level_device_on_off_cluster,
                                       level_entity_id)

    # test turning the lights on and off from the HA
    await async_test_on_off_from_hass(hass, on_off_device_on_off_cluster,
                                      on_off_entity_id)

    await async_test_level_on_off_from_hass(hass, level_device_on_off_cluster,
                                            level_device_level_cluster,
                                            level_entity_id)

    # test turning the lights on and off from the light
    await async_test_on_from_light(hass, level_device_on_off_cluster,
                                   level_entity_id)

    # test getting a brightness change from the network
    await async_test_dimmer_from_light(hass, level_device_level_cluster,
                                       level_entity_id, 150, STATE_ON)

    # test adding a new light to the network and HA
    await async_test_device_join(hass,
                                 zha_gateway,
                                 OnOff.cluster_id,
                                 DOMAIN,
                                 device_type=DeviceType.ON_OFF_LIGHT)
Пример #60
0
 def _run_test(*args, **kwargs):
     event_loop = asyncio.new_event_loop()
     asyncio.set_event_loop(event_loop)
     c = asyncio.coroutine(func)
     event_loop.run_until_complete(c(*args, **kwargs))
     event_loop.close()