示例#1
0
文件: test_doc.py 项目: hhru/frontik
    def test_future_list(self):
        d = Doc('a')
        f = Future()
        f.set_result([etree.Comment('ccc'), etree.Element('bbb')])
        d.put(f)

        self.assertXmlEqual(d.to_etree_element(), """<?xml version='1.0'?>\n<a><!--ccc--><bbb/></a>""")
示例#2
0
    def test_looking_for_driver_no_drivers(self):
        user = {
            'chat_id': 0,
            'current_location': [0., 0.]
        }

        drivers = []
        future_get_drivers = Future()
        future_get_drivers.set_result(drivers)
        self.users.get_drivers_within_distance = mock.MagicMock(
            return_value=future_get_drivers
        )

        yield self.stage.run(user, {})

        self.stage.sender.assert_has_calls([
            mock.call({
                'chat_id': 0,
                'text': 'looking for a driver'
            }),
            mock.call({
                'chat_id': 0,
                'text': 'no available drivers found'
            })
        ])
        self.assertEqual(2,self.stage.sender.call_count)
        self.assertEqual(None, user['proposed_driver'])
示例#3
0
文件: locks.py 项目: rgbkrk/tornado
    def acquire(
        self, timeout: Union[float, datetime.timedelta] = None
    ) -> "Future[_ReleasingContextManager]":
        """Decrement the counter. Returns a Future.

        Block if the counter is zero and wait for a `.release`. The Future
        raises `.TimeoutError` after the deadline.
        """
        waiter = Future()  # type: Future[_ReleasingContextManager]
        if self._value > 0:
            self._value -= 1
            waiter.set_result(_ReleasingContextManager(self))
        else:
            self._waiters.append(waiter)
            if timeout:

                def on_timeout() -> None:
                    if not waiter.done():
                        waiter.set_exception(gen.TimeoutError())
                    self._garbage_collect()

                io_loop = ioloop.IOLoop.current()
                timeout_handle = io_loop.add_timeout(timeout, on_timeout)
                waiter.add_done_callback(
                    lambda _: io_loop.remove_timeout(timeout_handle)
                )
        return waiter
示例#4
0
 def put_task(self, inputs, callback=None):
     """ return a Future of output."""
     f = Future()
     if callback is not None:
         f.add_done_callback(callback)
     self.input_queue.put((inputs, f))
     return f
示例#5
0
    def get_tweets(username):
        result_future = Future()
        """helper function to fetch 200 tweets for a user with @username
        """
        TWITTER_URL = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
        '''
        curl --get 'https://api.twitter.com/1.1/statuses/user_timeline.json' --data 'count=200&screen_name=twitterapi' --header 'Authorization: OAuth oauth_consumer_key="BlXj0VRgkpUOrN3b6vTyJu8YB", oauth_nonce="9cb4b1aaa1fb1d79e0fbd9bc8b33f82a", oauth_signature="SrJxsOCzOTnudKQMr4nMQ0gDuRk%3D", oauth_signature_method="HMAC-SHA1", oauth_timestamp="1456006969", oauth_token="701166849883373568-bqVfk8vajGxWIlKDe94CRjMJtBvwdQQ", oauth_version="1.0"' --verbose
        '''
        auth = OAuth1('BlXj0VRgkpUOrN3b6vTyJu8YB', 'qzkhGeWIYVXod9umMuinHF2OFmJxiucQspX5JsA7aH8xs5t4DT',
                      '701166849883373568-bqVfk8vajGxWIlKDe94CRjMJtBvwdQQ', 'y3gx0F5fLyIQQFNDev8JtpPKpEUmyy3mMibxCcTK2kbZZ')

        data = {'count': 200,
                'screen_name': username}

        r = requests.get(url=TWITTER_URL, params=data, auth=auth)
        data = r.json()

        if 'errors' in data:
            raise Exception
        res = []
        for item in data:
            if 'retweeted_status' not in item.keys():
                res.append(item)
        result_future.set_result(res)
        return result_future
示例#6
0
 def test_completes_before_timeout(self):
     future = Future()
     self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
                              lambda: future.set_result('asdf'))
     result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
                                     future, io_loop=self.io_loop)
     self.assertEqual(result, 'asdf')
示例#7
0
    def get(self, timeout: Union[float, datetime.timedelta] = None) -> Awaitable[_T]:
        """Remove and return an item from the queue.

        Returns an awaitable which resolves once an item is available, or raises
        `tornado.util.TimeoutError` after a timeout.

        ``timeout`` may be a number denoting a time (on the same
        scale as `tornado.ioloop.IOLoop.time`, normally `time.time`), or a
        `datetime.timedelta` object for a deadline relative to the
        current time.

        .. note::

           The ``timeout`` argument of this method differs from that
           of the standard library's `queue.Queue.get`. That method
           interprets numeric values as relative timeouts; this one
           interprets them as absolute deadlines and requires
           ``timedelta`` objects for relative timeouts (consistent
           with other timeouts in Tornado).

        """
        future = Future()  # type: Future[_T]
        try:
            future.set_result(self.get_nowait())
        except QueueEmpty:
            self._getters.append(future)
            _set_timeout(future, timeout)
        return future
def async_fetch_future(url):
    http_client = AsyncHTTPClient()
    my_future = Future()
    fetch_future = http_client.fetch(url)
    fetch_future.add_done_callback(lambda f: my_future.set_result(f.result()))

    return my_future
示例#9
0
 def resolve(self, host, port, family=0):
     if is_valid_ip(host):
         addresses = [host]
     else:
         # gethostbyname doesn't take callback as a kwarg
         fut = Future()
         self.channel.gethostbyname(host, family,
                                    lambda result, error: fut.set_result((result, error)))
         result, error = yield fut
         if error:
             raise IOError('C-Ares returned error %s: %s while resolving %s' %
                           (error, pycares.errno.strerror(error), host))
         addresses = result.addresses
     addrinfo = []
     for address in addresses:
         if '.' in address:
             address_family = socket.AF_INET
         elif ':' in address:
             address_family = socket.AF_INET6
         else:
             address_family = socket.AF_UNSPEC
         if family != socket.AF_UNSPEC and family != address_family:
             raise IOError('Requested socket family %d but got %d' %
                           (family, address_family))
         addrinfo.append((address_family, (address, port)))
     raise gen.Return(addrinfo)
示例#10
0
文件: gen_test.py 项目: 0xkag/tornado
 def test_fails_before_timeout(self):
     future = Future()
     self.io_loop.add_timeout(
         datetime.timedelta(seconds=0.1),
         lambda: future.set_exception(ZeroDivisionError))
     with self.assertRaises(ZeroDivisionError):
         yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
示例#11
0
 def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
                    source_port=None):
     # Always connect in plaintext; we'll convert to ssl if necessary
     # after one connection has completed.
     source_port_bind = source_port if isinstance(source_port, int) else 0
     source_ip_bind = source_ip
     if source_port_bind and not source_ip:
         # User required a specific port, but did not specify
         # a certain source IP, will bind to the default loopback.
         source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
         # Trying to use the same address family as the requested af socket:
         # - 127.0.0.1 for IPv4
         # - ::1 for IPv6
     socket_obj = socket.socket(af)
     set_close_exec(socket_obj.fileno())
     if source_port_bind or source_ip_bind:
         # If the user requires binding also to a specific IP/port.
         try:
             socket_obj.bind((source_ip_bind, source_port_bind))
         except socket.error:
             socket_obj.close()
             # Fail loudly if unable to use the IP/port.
             raise
     try:
         stream = IOStream(socket_obj,
                           max_buffer_size=max_buffer_size)
     except socket.error as e:
         fu = Future()
         fu.set_exception(e)
         return fu
     else:
         return stream, stream.connect(addr)
示例#12
0
    def test_future_etree_element(self):
        d = Doc('a')
        f = Future()
        f.set_result(etree.Element('b'))
        d.put(f)

        self.assertXmlEqual(d.to_etree_element(), """<?xml version='1.0' encoding='utf-8'?>\n<a><b/></a>""")
示例#13
0
文件: client.py 项目: ei-grad/toredis
    def send_message(self, args, callback=None):

        command = args[0]

        if 'SUBSCRIBE' in command:
            raise NotImplementedError('Not yet.')

        # Do not allow the commands, affecting the execution of other commands,
        # to be used on shared connection.
        if command in ('WATCH', 'MULTI'):
            if self.is_shared():
                raise Exception('Command %s is not allowed while connection '
                                'is shared!' % command)
            if command == 'WATCH':
                self._watch.add(args[1])
            if command == 'MULTI':
                self._multi = True

        # monitor transaction state, to unlock correctly
        if command in ('EXEC', 'DISCARD', 'UNWATCH'):
            if command in ('EXEC', 'DISCARD'):
                self._multi = False
            self._watch.clear()

        self.stream.write(self.format_message(args))

        future = Future()

        if callback is not None:
            future.add_done_callback(stack_context.wrap(callback))

        self.callbacks.append(future.set_result)

        return future
示例#14
0
 def exchange_declare(self, exchange, exchange_type='direct', passive=False, durable=False,
                      auto_delete=False, internal=False, nowait=False, arguments=None, type=None):
     f = Future()
     self.channel.exchange_declare(lambda *a: f.set_result(a), exchange=exchange, exchange_type=exchange_type,
                                   passive=passive, durable=durable, auto_delete=auto_delete,
                                   internal=internal, nowait=nowait, arguments=arguments, type=type)
     return f
示例#15
0
 def queue_unbind(self, queue='', exchange=None, routing_key=None, arguments=None):
     f = Future()
     self.channel.queue_unbind(
         callback=lambda *a: f.set_result(a), queue=queue,
         exchange=exchange, routing_key=routing_key, arguments=arguments
     )
     return f
示例#16
0
 def test_completes_before_timeout(self):
     future = Future()  # type: Future[str]
     self.io_loop.add_timeout(
         datetime.timedelta(seconds=0.1), lambda: future.set_result("asdf")
     )
     result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
     self.assertEqual(result, "asdf")
def wechat_api_mock(client, request, *args, **kwargs):

    url = urlparse(request.url)
    path = url.path.replace('/cgi-bin/', '').replace('/', '_')
    if path.startswith('_'):
        path = path[1:]
    res_file = os.path.join(_FIXTURE_PATH, '%s.json' % path)
    content = {
        'errcode': 99999,
        'errmsg': 'can not find fixture %s' % res_file,
    }
    headers = {
        'Content-Type': 'application/json'
    }
    try:
        with open(res_file, 'rb') as f:
            content = f.read().decode('utf-8')
    except (IOError, ValueError) as e:
        content['errmsg'] = 'Loads fixture {0} failed, error: {1}'.format(
            res_file,
            e
        )
        content = json.dumps(content)

    buffer = StringIO(content)
    resp = HTTPResponse(
        request,
        200,
        headers=headers,
        buffer=buffer,
    )
    future = Future()
    future.set_result(resp)
    return future
示例#18
0
 def acquire(self, pool_need_log=False):
     """Occupy free connection"""
     future = Future()
     while True:
         if self.free:
             conn = self.free.pop()
             if conn.valid:
                 self.busy.add(conn)
             else:
                 self.dead.add(conn)
                 continue
             future.set_result(conn)
             conn.connection_need_log = pool_need_log
             log.debug("Acquired free connection %s", conn.fileno)
             return future
         elif self.busy:
             log.debug("No free connections, and some are busy - put in waiting queue")
             self.waiting_queue.appendleft(future)
             return future
         elif self.pending:
             log.debug("No free connections, but some are pending - put in waiting queue")
             self.waiting_queue.appendleft(future)
             return future
         else:
             log.debug("All connections are dead")
             return None
示例#19
0
    def test_moment(self):
        calls = []

        @gen.coroutine
        def f(name, yieldable):
            for i in range(5):
                calls.append(name)
                yield yieldable

        # First, confirm the behavior without moment: each coroutine
        # monopolizes the event loop until it finishes.
        immediate = Future()  # type: Future[None]
        immediate.set_result(None)
        yield [f("a", immediate), f("b", immediate)]
        self.assertEqual("".join(calls), "aaaaabbbbb")

        # With moment, they take turns.
        calls = []
        yield [f("a", gen.moment), f("b", gen.moment)]
        self.assertEqual("".join(calls), "ababababab")
        self.finished = True

        calls = []
        yield [f("a", gen.moment), f("b", immediate)]
        self.assertEqual("".join(calls), "abbbbbaaaa")
    def test_rank_calls_sort_and_returns_output(self):
        #setup the response from the summarum endpoint
        expected_result = [
            ('a', 'b', 10.0),
            ('c', 'd', 1.0)
        ]
        future = Future()
        future.set_result(expected_result)
        self.endpoint.fetch_and_parse = Mock(return_value=future)
        
        #setup the response return value from the sort call
        expected_ranked_facts = {
            'predicate': {},
            'objects': []
        }
        self.ranking_service.sort = Mock(return_value = expected_ranked_facts)
        
        #call the function under test
        facts = {}
        ranked_facts = yield self.ranking_service.rank(facts)

        #check that sort was called
        self.ranking_service.sort.assert_called_once_with(facts)
        #check that rank returns the output from sort
        self.assertEquals(ranked_facts, expected_ranked_facts)
示例#21
0
def maybe_future(x):
    if is_future(x):
        return x
    else:
        fut = Future()
        fut.set_result(x)
        return fut
示例#22
0
 def update_directories(self,update_dir_list):
     res_future = Future()
     res = {}
     for dirpath in update_dir_list:
         dir_list = []
         file_list = []
         try:
             for i in common.get_dir_contents(self.current_user, dirpath):
                 if i[0].startswith('.'):
                     continue
                 if i[2]:
                     dir_list.append(tuple(list(i)+[i[0].lower()]))
                     # dir_list.append(i)
                 else:
                     file_list.append(tuple(list(i)+[i[0].lower()]))
                     # file_list.append(i)
             # dir_list.sort()
             # file_list.sort()
             dir_list = sorted(dir_list,key=operator.itemgetter(3))
             file_list = sorted(file_list,key=operator.itemgetter(3))
             res.update({dirpath:dir_list+file_list})
         except common.MissingFileError:
             continue
     res_future.set_result(res)
     return res_future
示例#23
0
文件: srtr.py 项目: lsc36/srtr
 def wait(self, position):
     future = Future()
     if position != self.count():
         future.set_result(dict(position=self.count(), last_word=self.last()))
     else:
         self.waiters.add(future)
     return future
    def fetch(self, request, callback=None, raise_error=True, **kwargs):
        if not isinstance(request, HTTPRequest):
            request = HTTPRequest(url=request, **kwargs)

        key = self.cache.create_key(request)

        # Check and return future if there is a pending request
        pending = self.pending_requests.get(key)
        if pending:
            return pending

        response = self.cache.get_response_and_time(key)
        if response:
            response.cached = True
            if callback:
                self.io_loop.add_callback(callback, response)
            future = Future()
            future.set_result(response)
            return future

        future = orig_fetch(self, request, callback, raise_error, **kwargs)

        self.pending_requests[key] = future

        def cache_response(future):
            exc = future.exception()
            if exc is None:
                self.cache.save_response(key, future.result())

        future.add_done_callback(cache_response)
        return future
示例#25
0
 def wait_for(self, response):
     future = Future()
     response.callback = lambda resp: future.set_result(resp.response)
     if self.running:
         return future
     else:
         return self.run(future)
示例#26
0
 def write_headers(self, start_line, headers, chunk=None, callback=None):
     """Implements `.HTTPConnection.write_headers`."""
     if self.is_client:
         self._request_start_line = start_line
         # Client requests with a non-empty body must have either a
         # Content-Length or a Transfer-Encoding.
         self._chunking_output = (
             start_line.method in ('POST', 'PUT', 'PATCH') and
             'Content-Length' not in headers and
             'Transfer-Encoding' not in headers)
     else:
         self._response_start_line = start_line
         self._chunking_output = (
             # TODO: should this use
             # self._request_start_line.version or
             # start_line.version?
             self._request_start_line.version == 'HTTP/1.1' and
             # 304 responses have no body (not even a zero-length body), and so
             # should not have either Content-Length or Transfer-Encoding.
             # headers.
             start_line.code != 304 and
             # No need to chunk the output if a Content-Length is specified.
             'Content-Length' not in headers and
             # Applications are discouraged from touching Transfer-Encoding,
             # but if they do, leave it alone.
             'Transfer-Encoding' not in headers)
         # If a 1.0 client asked for keep-alive, add the header.
         if (self._request_start_line.version == 'HTTP/1.0' and
             (self._request_headers.get('Connection', '').lower()
              == 'keep-alive')):
             headers['Connection'] = 'Keep-Alive'
     if self._chunking_output:
         headers['Transfer-Encoding'] = 'chunked'
     if (not self.is_client and
         (self._request_start_line.method == 'HEAD' or
          start_line.code == 304)):
         self._expected_content_remaining = 0
     elif 'Content-Length' in headers:
         self._expected_content_remaining = int(headers['Content-Length'])
     else:
         self._expected_content_remaining = None
     lines = [utf8("%s %s %s" % start_line)]
     lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()])
     for line in lines:
         if b'\n' in line:
             raise ValueError('Newline in header: ' + repr(line))
     if self.stream.closed():
         self._write_future = Future()
         self._write_future.set_exception(iostream.StreamClosedError())
     else:
         if callback is not None:
             self._write_callback = stack_context.wrap(callback)
         else:
             self._write_future = Future()
         data = b"\r\n".join(lines) + b"\r\n\r\n"
         if chunk:
             data += self._format_chunk(chunk)
         self._pending_write = self.stream.write(data)
         self._pending_write.add_done_callback(self._on_write_complete)
     return self._write_future
示例#27
0
文件: rpc.py 项目: ericsunset/totoro
    def _consume(self, task_id, callback, wait_timeout):
        conn = yield self._connection_pool.get_connection()
        timeout = None
        consumer_tag = generate_consumer_tag()
        consume_future = Future()

        def _basic_cancel():
            conn.basic_cancel(consumer_tag=consumer_tag)
            consume_future.set_result(None)

        if wait_timeout:
            def _on_timeout():
                _basic_cancel()
                callback(WaitForResultTimeoutError(wait_timeout))
            timeout = self.io_loop.add_timeout(timedelta(milliseconds=wait_timeout), _on_timeout)

        try:
            def _on_result(reply):
                if timeout:
                    self.io_loop.remove_timeout(timeout)
                _basic_cancel()
                callback(reply)

            name = self.backend.binding.routing_key
            queue_declare_future = Future()
            conn.queue_declare(lambda method_frame: queue_declare_future.set_result(None),
                               queue=name,
                               durable=self.backend.persistent)
            yield queue_declare_future
            conn.basic_consume(
                consumer_callback=lambda channel, deliver, properties, reply: _on_result(reply),
                queue=name, consumer_tag=consumer_tag, no_ack=True)
        finally:
            self._connection_pool.put_connection(conn)
            yield consume_future
示例#28
0
    def write(self, data):
        assert isinstance(data, bytes)
        if self._closed:
            raise StreamClosedError(real_error=self.error)

        if not data:
            if self._write_future:
                return self._write_future
            future = Future()
            future.set_result(None)
            return future

        if self._write_buffer_size:
            self._write_buffer += data
        else:
            self._write_buffer = bytearray(data)
        self._write_buffer_size += len(data)
        future = self._write_future = Future()

        if not self._connecting:
            self._handle_write()
            if self._write_buffer_size:
                if not self._state & self.io_loop.WRITE:
                    self._state = self._state | self.io_loop.WRITE
                    self.io_loop.update_handler(self.fileno(), self._state)

        return future
示例#29
0
 def apply(callback) -> 'promise.Promise':
     f = Future()
     try:
         f.set_result(callback())
     except BaseException as e:
         f.set_exception(e)
     return Promise(f)
        def wrapper(*args, **kargs):
            future = Future()
            future.set_result(self._response)

            with patch.object(AsyncHTTPClient, "fetch", return_value=future):
                with patch.object(Client, "fetch", return_value=future):
                    yield coroutine(*args, **kargs)
 def readFrame(self):
     """Empty read frame that is never ready"""
     return Future()
示例#32
0
 def _extend(self, res):
     Cursor._extend(self, res)
     self.new_response.set_result(True)
     self.new_response = Future()
示例#33
0
 def __init__(self, *args, **kwargs):
     Cursor.__init__(self, *args, **kwargs)
     self.new_response = Future()
示例#34
0
class TestIOStreamStartTLS(AsyncTestCase):
    def setUp(self):
        try:
            super(TestIOStreamStartTLS, self).setUp()
            self.listener, self.port = bind_unused_port()
            self.server_stream = None
            self.server_accepted = Future()
            netutil.add_accept_handler(self.listener, self.accept)
            self.client_stream = IOStream(socket.socket())
            self.io_loop.add_future(self.client_stream.connect(
                ('127.0.0.1', self.port)), self.stop)
            self.wait()
            self.io_loop.add_future(self.server_accepted, self.stop)
            self.wait()
        except Exception as e:
            print(e)
            raise

    def tearDown(self):
        if self.server_stream is not None:
            self.server_stream.close()
        if self.client_stream is not None:
            self.client_stream.close()
        self.listener.close()
        super(TestIOStreamStartTLS, self).tearDown()

    def accept(self, connection, address):
        if self.server_stream is not None:
            self.fail("should only get one connection")
        self.server_stream = IOStream(connection)
        self.server_accepted.set_result(None)

    @gen.coroutine
    def client_send_line(self, line):
        self.client_stream.write(line)
        recv_line = yield self.server_stream.read_until(b"\r\n")
        self.assertEqual(line, recv_line)

    @gen.coroutine
    def server_send_line(self, line):
        self.server_stream.write(line)
        recv_line = yield self.client_stream.read_until(b"\r\n")
        self.assertEqual(line, recv_line)

    def client_start_tls(self, ssl_options=None, server_hostname=None):
        client_stream = self.client_stream
        self.client_stream = None
        return client_stream.start_tls(False, ssl_options, server_hostname)

    def server_start_tls(self, ssl_options=None):
        server_stream = self.server_stream
        self.server_stream = None
        return server_stream.start_tls(True, ssl_options)

    @gen_test
    def test_start_tls_smtp(self):
        # This flow is simplified from RFC 3207 section 5.
        # We don't really need all of this, but it helps to make sure
        # that after realistic back-and-forth traffic the buffers end up
        # in a sane state.
        yield self.server_send_line(b"220 mail.example.com ready\r\n")
        yield self.client_send_line(b"EHLO mail.example.com\r\n")
        yield self.server_send_line(b"250-mail.example.com welcome\r\n")
        yield self.server_send_line(b"250 STARTTLS\r\n")
        yield self.client_send_line(b"STARTTLS\r\n")
        yield self.server_send_line(b"220 Go ahead\r\n")
        client_future = self.client_start_tls(dict(cert_reqs=ssl.CERT_NONE))
        server_future = self.server_start_tls(_server_ssl_options())
        self.client_stream = yield client_future
        self.server_stream = yield server_future
        self.assertTrue(isinstance(self.client_stream, SSLIOStream))
        self.assertTrue(isinstance(self.server_stream, SSLIOStream))
        yield self.client_send_line(b"EHLO mail.example.com\r\n")
        yield self.server_send_line(b"250 mail.example.com welcome\r\n")

    @gen_test
    def test_handshake_fail(self):
        server_future = self.server_start_tls(_server_ssl_options())
        # Certificates are verified with the default configuration.
        client_future = self.client_start_tls(server_hostname="localhost")
        with ExpectLog(gen_log, "SSL Error"):
            with self.assertRaises(ssl.SSLError):
                yield client_future
        with self.assertRaises((ssl.SSLError, socket.error)):
            yield server_future

    @unittest.skipIf(not hasattr(ssl, 'create_default_context'),
                     'ssl.create_default_context not present')
    @gen_test
    def test_check_hostname(self):
        # Test that server_hostname parameter to start_tls is being used.
        # The check_hostname functionality is only available in python 2.7 and
        # up and in python 3.4 and up.
        server_future = self.server_start_tls(_server_ssl_options())
        client_future = self.client_start_tls(
            ssl.create_default_context(),
            server_hostname=b'127.0.0.1')
        with ExpectLog(gen_log, "SSL Error"):
            with self.assertRaises(ssl.SSLError):
                # The client fails to connect with an SSL error.
                yield client_future
        with self.assertRaises(Exception):
            # The server fails to connect, but the exact error is unspecified.
            yield server_future
示例#35
0
文件: wsgi.py 项目: niklas6543/ships
class WSGIApplication(web.Application):
    """A WSGI equivalent of `tornado.web.Application`.

    .. deprecated:: 4.0

       Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
       This class will be removed in Tornado 6.0.
    """
    def __call__(self, environ, start_response):
        return WSGIAdapter(self)(environ, start_response)


# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)


class _WSGIConnection(httputil.HTTPConnection):
    def __init__(self, method, start_response, context):
        self.method = method
        self.start_response = start_response
        self.context = context
        self._write_buffer = []
        self._finished = False
        self._expected_content_remaining = None
        self._error = None

    def set_close_callback(self, callback):
        # WSGI has no facility for detecting a closed connection mid-request,
示例#36
0
class EC2Pool:
    """Initialize a pool for instance allocation and recycling.

    All instances allocated using this pool will be tagged as follows:

    Name
        loads-BROKER_ID
    Broker
        BROKER_ID

    Instances in use by a run are tagged with the additional tags:

    RunId
        RUN_ID
    Uuid
        STEP_ID

    .. warning::

        This instance is **NOT SAFE FOR CONCURRENT USE BY THREADS**.

    """
    def __init__(self, broker_id, access_key=None, secret_key=None,
                 key_pair="loads", security="loads", max_idle=600,
                 user_data=None, io_loop=None, port=None,
                 owner_id="595879546273", use_filters=True):
        self.owner_id = owner_id
        self.use_filters = use_filters
        self.broker_id = broker_id
        self.access_key = access_key
        self.secret_key = secret_key
        self.max_idle = max_idle
        self.key_pair = key_pair
        self.security = security
        self.user_data = user_data
        self._instances = defaultdict(list)
        self._tag_filters = {"tag:Name": "loads-%s*" % self.broker_id,
                             "tag:Project": "loads"}
        self._conns = {}
        self._recovered = {}
        self._executor = concurrent.futures.ThreadPoolExecutor(15)
        self._loop = io_loop or tornado.ioloop.IOLoop.instance()
        self.port = port
        # see https://github.com/boto/boto/issues/2617
        if port is not None:
            self.is_secure = port == 443
        else:
            self.is_secure = True

        # Asynchronously initialize ourself when the pool runs
        self._loop.add_future(
            gen.convert_yielded(self.initialize()),
            self._initialized
        )

        self.ready = Future()

    def shutdown(self):
        """Make sure we shutdown the executor.
        """
        self._executor.shutdown()

    def _run_in_executor(self, func, *args, **kwargs):
        return to_tornado_future(self._executor.submit(func, *args, **kwargs))

    def initialize(self):
        """Fully initialize the AWS pool and dependencies, recover existing
        instances, etc.

        :returns: A future that will require the loop running to retrieve.

        """
        logger.debug("Pulling CoreOS AMI info...")
        populate_ami_ids(self.access_key, self.secret_key, port=self.port,
                         owner_id=self.owner_id, use_filters=self.use_filters)
        return self._recover()

    def _initialized(self, future):
        # Run the result to ensure we raise an exception if any occurred
        logger.debug("Finished initializing: %s.", future.result())
        self.ready.set_result(True)

    async def _region_conn(self, region=None):
        if region in self._conns:
            return self._conns[region]

        # Setup a connection
        conn = await self._run_in_executor(
            connect_to_region, region,
            aws_access_key_id=self.access_key,
            aws_secret_access_key=self.secret_key,
            port=self.port, is_secure=self.is_secure)

        self._conns[region] = conn
        return conn

    async def _recover_region(self, region):
        """Recover all the instances in a region"""
        conn = await self._region_conn(region)

        if self.use_filters:
            filters = self._tag_filters
        else:
            filters = {}

        instances = await self._run_in_executor(
            conn.get_only_instances,
            filters=filters)

        return instances

    async def _recover(self):
        """Recover allocated instances from EC2."""
        recovered_instances = defaultdict(list)

        # Recover every region at once
        instancelist = await gen.multi(
            [self._recover_region(x) for x in AWS_REGIONS])

        logger.debug("Found %s instances to look at for recovery.",
                     sum(map(len, instancelist)))

        allocated = 0
        not_used = 0

        for instances in instancelist:
            for instance in instances:
                # skipping terminated instances
                if instance.state == 'terminated':
                    continue
                tags = instance.tags
                region = instance.region.name
                logger.debug('- %s (%s)' % (instance.id, region))
                # If this has been 'pending' too long, we put it in the main
                # instance pool for later reaping
                if not available_instance(instance):
                    self._instances[region].append(instance)
                    continue

                if tags.get("RunId") and tags.get("Uuid"):
                    # Put allocated instances into a recovery pool separate
                    # from unallocated
                    inst_key = (tags["RunId"], tags["Uuid"])
                    recovered_instances[inst_key].append(instance)
                    allocated += 1
                else:
                    self._instances[region].append(instance)
                    not_used += 1

        logger.debug("%d instances were allocated to a run" % allocated)
        logger.debug("%d instances were not used" % not_used)

        self._recovered = recovered_instances

    def _locate_recovered_instances(self, run_id, uuid):
        """Locates and removes existing allocated instances if any"""
        key = run_id, uuid

        if key not in self._recovered:
            # XXX do we want to raise here?
            return []

        instances = self._recovered[key]
        del self._recovered[key]
        return instances

    def _locate_existing_instances(self, count, inst_type, region):
        """Locates and removes existing available instances if any."""
        region_instances = self._instances[region]
        instances = []
        remaining = []

        for inst in region_instances:
            if available_instance(inst) and inst_type == inst.instance_type:
                instances.append(inst)
            else:
                remaining.append(inst)

            if len(instances) >= count:
                break

        # Determine how many were removed, and reconstruct the unallocated
        # instance list with the instances not used
        removed = len(instances) + len(remaining)
        self._instances[region] = region_instances[removed:] + remaining
        return instances

    async def _allocate_instances(self, conn, count, inst_type, region):
        """Allocate a set of new instances and return them."""
        ami_id = get_ami(region, inst_type)
        reservations = await self._run_in_executor(
            conn.run_instances,
            ami_id, min_count=count, max_count=count,
            key_name=self.key_pair, security_groups=[self.security],
            user_data=self.user_data, instance_type=inst_type)

        return reservations.instances

    async def request_instances(self,
                                run_id: str,
                                uuid: str,
                                count=1,
                                inst_type="t1.micro",
                                region="us-west-2",
                                allocate_missing=True,
                                plan: Optional[str] = None,
                                owner: Optional[str] = None,
                                run_max_time: Optional[int] = None):
        """Allocate a collection of instances.

        :param run_id: Run ID for these instances
        :param uuid: UUID to use for this collection
        :param count: How many instances to allocate
        :param type: EC2 Instance type the instances should be
        :param region: EC2 region to allocate the instances in
        :param allocate_missing:
            If there's insufficient existing instances for this uuid,
            whether existing or new instances should be allocated to the
            collection.
        :param plan: Name of the instances' plan
        :param owner: Owner name of the instances
        :param run_max_time: Maximum expected run-time of instances in
            seconds
        :returns: Collection of allocated instances
        :rtype: :class:`EC2Collection`

        """
        if region not in AWS_REGIONS:
            raise LoadsException("Unknown region: %s" % region)

        # First attempt to recover instances for this run/uuid
        instances = self._locate_recovered_instances(run_id, uuid)
        remaining_count = count - len(instances)

        conn = await self._region_conn(region)

        # If existing/new are not being allocated, the recovered are
        # already tagged, so we're done.
        if not allocate_missing:
            return EC2Collection(run_id, uuid, conn, instances, self._loop)

        # Add any more remaining that should be used
        instances.extend(
            self._locate_existing_instances(remaining_count, inst_type, region)
        )

        # Determine if we should allocate more instances
        num = count - len(instances)
        if num > 0:
            new_instances = await self._allocate_instances(
                conn, num, inst_type, region)
            logger.debug("Allocated instances%s: %s",
                         " (Owner: %s)" % owner if owner else "",
                         new_instances)
            instances.extend(new_instances)

        # Tag all the instances
        if self.use_filters:
            tags = {
                "Name": "loads-{}{}".format(self.broker_id,
                                            "-" + plan if plan else ""),
                "Project": "loads",
                "RunId": run_id,
                "Uuid": uuid,
            }
            if owner:
                tags["Owner"] = owner
            if run_max_time is not None:
                self._tag_for_reaping(tags, run_max_time)

            # Sometimes, we can get instance data back before the AWS
            # API fully recognizes it, so we wait as needed.
            async def tag_instance(instance):
                retries = 0
                while True:
                    try:
                        await self._run_in_executor(
                            conn.create_tags, [instance.id], tags)
                        break
                    except:
                        if retries > 5:
                            raise
                    retries += 1
                    await gen.Task(self._loop.add_timeout, time.time() + 1)
            await gen.multi([tag_instance(x) for x in instances])
        return EC2Collection(run_id, uuid, conn, instances, self._loop)

    def _tag_for_reaping(self,
                         tags: Dict[str, str],
                         run_max_time: int) -> None:
        """Tag an instance for the mozilla-services reaper

        Set instances to stop after run_max_time + REAPER_DELTA

        """
        now = datetime.utcnow()
        reap = now + timedelta(seconds=run_max_time) + REAPER_DELTA
        tags['REAPER'] = "{}|{:{dfmt}}".format(
            REAPER_STATE, reap, dfmt="%Y-%m-%d %I:%M%p UTC")
        if reap < now + REAPER_FORCE:
            # the reaper ignores instances younger than REAPER_FORCE
            # unless forced w/ REAP_ME
            tags['REAP_ME'] = ""

    async def release_instances(self, collection):
        """Return a collection of instances to the pool.

        :param collection: Collection to return
        :type collection: :class:`EC2Collection`

        """
        # Sometimes a collection ends up with zero instances after pruning
        # dead ones
        if not collection.instances:
            return

        region = collection.instances[0].instance.region.name
        instances = [x.instance for x in collection.instances]

        # De-tag the Run data on these instances
        conn = await self._region_conn(region)

        if self.use_filters:
            await self._run_in_executor(
                conn.create_tags,
                [x.id for x in instances],
                {"RunId": "", "Uuid": ""})

        self._instances[region].extend(instances)

    async def reap_instances(self):
        """Immediately reap all instances."""
        # Remove all the instances before yielding actions
        all_instances = self._instances
        self._instances = defaultdict(list)

        for region, instances in all_instances.items():
            conn = await self._region_conn(region)

            # submit these instances for termination
            await self._run_in_executor(
                conn.terminate_instances,
                [x.id for x in instances])
示例#37
0
    async def launch(self, kube, provider):
        """Ask JupyterHub to launch the image."""
        # Load the spec-specific configuration if it has been overridden
        repo_config = provider.repo_config(self.settings)

        # the image name (without tag) is unique per repo
        # use this to count the number of pods running with a given repo
        # if we added annotations/labels with the repo name via KubeSpawner
        # we could do this better
        image_no_tag = self.image_name.rsplit(':', 1)[0]
        matching_pods = 0
        total_pods = 0

        # TODO: run a watch to keep this up to date in the background
        pool = self.settings['executor']
        f = pool.submit(kube.list_namespaced_pod,
            self.settings["build_namespace"],
            label_selector='app=jupyterhub,component=singleuser-server',
        )
        # concurrent.futures.Future isn't awaitable
        # wrap in tornado Future
        # tornado 5 will have `.run_in_executor`
        tf = Future()
        chain_future(f, tf)
        pods = await tf
        for pod in pods.items:
            total_pods += 1
            for container in pod.spec.containers:
                # is the container running the same image as us?
                # if so, count one for the current repo.
                image = container.image.rsplit(':', 1)[0]
                if image == image_no_tag:
                    matching_pods += 1
                    break

        # TODO: put busy users in a queue rather than fail?
        # That would be hard to do without in-memory state.
        quota = repo_config.get('quota')
        if quota and matching_pods >= quota:
            app_log.error("%s has exceeded quota: %s/%s (%s total)",
                self.repo_url, matching_pods, quota, total_pods)
            await self.fail("Too many users running %s! Try again soon." % self.repo_url)
            return

        if quota and matching_pods >= 0.5 * quota:
            log = app_log.warning
        else:
            log = app_log.info
        log("Launching pod for %s: %s other pods running this repo (%s total)",
            self.repo_url, matching_pods, total_pods)

        await self.emit({
            'phase': 'launching',
            'message': 'Launching server...\n',
        })

        launcher = self.settings['launcher']
        retry_delay = launcher.retry_delay
        for i in range(launcher.retries):
            launch_starttime = time.perf_counter()
            if self.settings['auth_enabled']:
                # get logged in user's name
                user_model = self.hub_auth.get_user(self)
                username = user_model['name']
                if launcher.allow_named_servers:
                    # user can launch multiple servers, so create a unique server name
                    server_name = launcher.unique_name_from_repo(self.repo_url)
                else:
                    server_name = ''
            else:
                # create a name for temporary user
                username = launcher.unique_name_from_repo(self.repo_url)
                server_name = ''
            try:
                extra_args = {
                    'binder_ref_url': self.ref_url,
                    'binder_launch_host': self.binder_launch_host,
                    'binder_request': self.binder_request,
                    'binder_persistent_request': self.binder_persistent_request,
                }
                server_info = await launcher.launch(image=self.image_name,
                                                    username=username,
                                                    server_name=server_name,
                                                    repo_url=self.repo_url,
                                                    extra_args=extra_args)
                LAUNCH_TIME.labels(
                    status='success', retries=i,
                ).observe(time.perf_counter() - launch_starttime)
                LAUNCH_COUNT.labels(
                    status='success', **self.repo_metric_labels,
                ).inc()

            except Exception as e:
                if i + 1 == launcher.retries:
                    status = 'failure'
                else:
                    status = 'retry'
                # don't count retries in failure/retry
                # retry count is only interesting in success
                LAUNCH_TIME.labels(
                    status=status, retries=-1,
                ).observe(time.perf_counter() - launch_starttime)
                if status == 'failure':
                    # don't count retries per repo
                    LAUNCH_COUNT.labels(
                        status=status, **self.repo_metric_labels,
                    ).inc()

                if i + 1 == launcher.retries:
                    # last attempt failed, let it raise
                    raise

                # not the last attempt, try again
                app_log.error("Retrying launch after error: %s", e)
                await self.emit({
                    'phase': 'launching',
                    'message': 'Launch attempt {} failed, retrying...\n'.format(i + 1),
                })
                await gen.sleep(retry_delay)
                # exponential backoff for consecutive failures
                retry_delay *= 2
                continue
            else:
                # success
                break
        event = {
            'phase': 'ready',
            'message': 'server running at %s\n' % server_info['url'],
        }
        event.update(server_info)
        await self.emit(event)
    def post_request(self,
                     path,
                     file_obj,
                     file_size=None,
                     content_type=None,
                     content_encoding=None):
        """
        Send the POST request.

        A POST request is made up of one headers frame, and then 0+ data
        frames. This method begins by sending the headers, and then starts a
        series of calls to send data.
        """

        # if not self.settings_acked_future.done():
        #     print("Settings haven't been acked, yield until they are")
        #     yield self.settings_acked_future
        #     print("Settings acked! Let's send this pending post request")

        if type(file_obj) is str:
            file_size = len(file_obj)
            file_obj = StringIO(file_obj)

        # Now we can build a header block.
        request_headers = [
            (':method', 'POST'),
            (':authority', self.authority),
            (':scheme', 'https'),
            (':path', path),
            ('user-agent', self.USER_AGENT),
            ('content-length', str(file_size)),
        ]

        if content_type is not None:
            request_headers.append(('content-type', content_type))

            if content_encoding is not None:
                request_headers.append(('content-encoding', content_encoding))

        stream_id = self.conn.get_next_available_stream_id()

        self.conn.send_headers(stream_id, request_headers)

        # We now need to send all the relevant data. We do this by checking
        # what the acceptable amount of data is to send, and sending it. If we
        # find ourselves blocked behind flow control, we then place a deferred
        # and wait until that deferred fires.

        response_future = Future()

        self.responses[stream_id] = {'future': response_future}

        # We now need to send a number of data frames.
        try:
            while file_size > 0:
                # Firstly, check what the flow control window is for the current stream.
                window_size = self.conn.local_flow_control_window(
                    stream_id=stream_id)

                # Next, check what the maximum frame size is.
                max_frame_size = self.conn.max_outbound_frame_size

                # We will send no more than the window size or the remaining file size
                # of data in this call, whichever is smaller.
                bytes_to_send = min(window_size, file_size)

                while bytes_to_send > 0:
                    chunk_size = min(bytes_to_send, max_frame_size)
                    data_chunk = file_obj.read(chunk_size)
                    self.conn.send_data(stream_id=stream_id, data=data_chunk)

                    yield self.io_stream.write(self.conn.data_to_send())

                    bytes_to_send -= chunk_size
                    file_size -= chunk_size
        except StreamClosedError:
            logger.warning(
                "Connection was lost while sending stream {}".format(
                    stream_id))
        else:
            self.conn.end_stream(stream_id=stream_id)
        finally:
            file_obj.close()

        result = yield response_future
        raise gen.Return(result)
示例#39
0
文件: wsgi.py 项目: Liu0330/zufang
def _dummy_future():
    f = Future()
    f.set_result(None)
    return f
示例#40
0
 def start(self):
     """Return a Future that will never finish"""
     return Future()
示例#41
0
 def should_fail_api():
     future = Future()
     future.set_exception(c.TTransportException())
     return future
示例#42
0
    analogue to `time.sleep` (which should not be used in coroutines
    because it is blocking)::

        yield gen.sleep(0.5)

    Note that calling this function on its own does nothing; you must
    wait on the `.Future` it returns (usually by yielding it).

    .. versionadded:: 4.1
    """
    f = Future()
    IOLoop.current().call_later(duration, lambda: f.set_result(None))
    return f


_null_future = Future()
_null_future.set_result(None)

moment = Future()
moment.__doc__ = \
    """A special object which may be yielded to allow the IOLoop to run for
one iteration.

This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.

Usage: ``yield gen.moment``

.. versionadded:: 4.0
"""
moment.set_result(None)
示例#43
0
class AsyncProcess(object):
    """
    A coroutine-compatible multiprocessing.Process-alike.
    All normally blocking methods are wrapped in Tornado coroutines.
    """

    def __init__(self, loop=None, target=None, name=None, args=(), kwargs={}):
        if not callable(target):
            raise TypeError("`target` needs to be callable, not %r" % (type(target),))
        self._state = _ProcessState()
        self._loop = loop or IOLoop.current(instance=False)

        # _keep_child_alive is the write side of a pipe, which, when it is
        # closed, causes the read side of the pipe to unblock for reading. Note
        # that it is never closed directly. The write side is closed by the
        # kernel when our process exits, or possibly by the garbage collector
        # closing the file descriptor when the last reference to
        # _keep_child_alive goes away. We can take advantage of this fact to
        # monitor from the child and exit when the parent goes away unexpectedly
        # (for example due to SIGKILL). This variable is otherwise unused except
        # for the assignment here.
        parent_alive_pipe, self._keep_child_alive = mp_context.Pipe(duplex=False)

        self._process = mp_context.Process(
            target=self._run,
            name=name,
            args=(target, args, kwargs, parent_alive_pipe, self._keep_child_alive),
        )
        _dangling.add(self._process)
        self._name = self._process.name
        self._watch_q = PyQueue()
        self._exit_future = Future()
        self._exit_callback = None
        self._closed = False

        self._start_threads()

    def __repr__(self):
        return "<%s %s>" % (self.__class__.__name__, self._name)

    def _check_closed(self):
        if self._closed:
            raise ValueError("invalid operation on closed AsyncProcess")

    def _start_threads(self):
        self._watch_message_thread = threading.Thread(
            target=self._watch_message_queue,
            name="AsyncProcess %s watch message queue" % self.name,
            args=(
                weakref.ref(self),
                self._process,
                self._loop,
                self._state,
                self._watch_q,
                self._exit_future,
            ),
        )
        self._watch_message_thread.daemon = True
        self._watch_message_thread.start()

        def stop_thread(q):
            q.put_nowait({"op": "stop"})
            # We don't join the thread here as a finalizer can be called
            # asynchronously from anywhere

        self._finalizer = finalize(self, stop_thread, q=self._watch_q)
        self._finalizer.atexit = False

    def _on_exit(self, exitcode):
        # Called from the event loop when the child process exited
        self._process = None
        if self._exit_callback is not None:
            self._exit_callback(self)
        self._exit_future.set_result(exitcode)

    @classmethod
    def _immediate_exit_when_closed(cls, parent_alive_pipe):
        """
        Immediately exit the process when parent_alive_pipe is closed.
        """

        def monitor_parent():
            try:
                # The parent_alive_pipe should be held open as long as the
                # parent is alive and wants us to stay alive. Nothing writes to
                # it, so the read will block indefinitely.
                parent_alive_pipe.recv()
            except EOFError:
                # Parent process went away unexpectedly. Exit immediately. Could
                # consider other exiting approches here. My initial preference
                # is to unconditionally and immediately exit. If we're in this
                # state it is possible that a "clean" process exit won't work
                # anyway - if, for example, the system is getting bogged down
                # due to the running out of memory, exiting sooner rather than
                # later might be needed to restore normal system function.
                # If this is in appropriate for your use case, please file a
                # bug.
                os._exit(-1)
            else:
                # If we get here, something odd is going on. File descriptors
                # got crossed?
                raise RuntimeError("unexpected state: should be unreachable")

        t = threading.Thread(target=monitor_parent)
        t.daemon = True
        t.start()

    @staticmethod
    def reset_logger_locks():
        """ Python 2's logger's locks don't survive a fork event

        https://github.com/dask/distributed/issues/1491
        """
        for name in logging.Logger.manager.loggerDict.keys():
            for handler in logging.getLogger(name).handlers:
                handler.createLock()

    @classmethod
    def _run(cls, target, args, kwargs, parent_alive_pipe, _keep_child_alive):
        # On Python 2 with the fork method, we inherit the _keep_child_alive fd,
        # whether it is passed or not. Therefore, pass it unconditionally and
        # close it here, so that there are no other references to the pipe lying
        # around.
        cls.reset_logger_locks()

        _keep_child_alive.close()

        # Child process entry point
        cls._immediate_exit_when_closed(parent_alive_pipe)

        threading.current_thread().name = "MainThread"
        target(*args, **kwargs)

    @classmethod
    def _watch_message_queue(cls, selfref, process, loop, state, q, exit_future):
        # As multiprocessing.Process is not thread-safe, we run all
        # blocking operations from this single loop and ship results
        # back to the caller when needed.
        r = repr(selfref())
        name = selfref().name

        def _start():
            process.start()

            thread = threading.Thread(
                target=AsyncProcess._watch_process,
                name="AsyncProcess %s watch process join" % name,
                args=(selfref, process, state, q),
            )
            thread.daemon = True
            thread.start()

            state.is_alive = True
            state.pid = process.pid
            logger.debug("[%s] created process with pid %r" % (r, state.pid))

        while True:
            msg = q.get()
            logger.debug("[%s] got message %r" % (r, msg))
            op = msg["op"]
            if op == "start":
                _call_and_set_future(loop, msg["future"], _start)
            elif op == "terminate":
                _call_and_set_future(loop, msg["future"], process.terminate)
            elif op == "stop":
                break
            else:
                assert 0, msg

    @classmethod
    def _watch_process(cls, selfref, process, state, q):
        r = repr(selfref())
        process.join()
        exitcode = process.exitcode
        assert exitcode is not None
        logger.debug("[%s] process %r exited with code %r", r, state.pid, exitcode)
        state.is_alive = False
        state.exitcode = exitcode
        # Make sure the process is removed from the global list
        # (see _children in multiprocessing/process.py)
        # Then notify the Process object
        self = selfref()  # only keep self alive when required
        try:
            if self is not None:
                _loop_add_callback(self._loop, self._on_exit, exitcode)
        finally:
            self = None  # lose reference

    def start(self):
        """
        Start the child process.

        This method is a coroutine.
        """
        self._check_closed()
        fut = Future()
        self._watch_q.put_nowait({"op": "start", "future": fut})
        return fut

    def terminate(self):
        """
        Terminate the child process.

        This method is a coroutine.
        """
        self._check_closed()
        fut = Future()
        self._watch_q.put_nowait({"op": "terminate", "future": fut})
        return fut

    @gen.coroutine
    def join(self, timeout=None):
        """
        Wait for the child process to exit.

        This method is a coroutine.
        """
        self._check_closed()
        assert self._state.pid is not None, "can only join a started process"
        if self._state.exitcode is not None:
            return
        if timeout is None:
            yield self._exit_future
        else:
            try:
                yield gen.with_timeout(timedelta(seconds=timeout), self._exit_future)
            except gen.TimeoutError:
                pass

    def close(self):
        """
        Stop helper thread and release resources.  This method returns
        immediately and does not ensure the child process has exited.
        """
        if not self._closed:
            self._finalizer()
            self._process = None
            self._closed = True

    def set_exit_callback(self, func):
        """
        Set a function to be called by the event loop when the process exits.
        The function is called with the AsyncProcess as sole argument.

        The function may be a coroutine function.
        """
        # XXX should this be a property instead?
        assert callable(func), "exit callback should be callable"
        assert (
            self._state.pid is None
        ), "cannot set exit callback when process already started"
        self._exit_callback = func

    def is_alive(self):
        return self._state.is_alive

    @property
    def pid(self):
        return self._state.pid

    @property
    def exitcode(self):
        return self._state.exitcode

    @property
    def name(self):
        return self._name

    @property
    def daemon(self):
        return self._process.daemon

    @daemon.setter
    def daemon(self, value):
        self._process.daemon = value
示例#44
0
 def should_fail_api():
     future = Future()
     future.set_exception(
         self.pingpong_thrift_client.service.AboutToShutDownException())
     return future
class NotebookAPIHandler(TokenAuthorizationMixin,
                         CORSMixin,
                         JSONErrorsMixin,
                         tornado.web.RequestHandler):
    """Executes code from a notebook cell in response to HTTP requests at the
    route registered in association with this class.

    Supports the GET, POST, PUT, and DELETE HTTP methods.

    Parameters
    ----------
    sources : dict
        Maps HTTP verb strings to annotated cells extracted from a notebook
    response_sources
        Maps HTTP verb strings to ResponseInfo annotated cells extracted from a
        notebook
    kernel_pool
        Instance of services.kernels.ManagedKernelPool
    kernel_name
        Kernel spec name used to launch the kernel pool
    kernel_language
        Kernel spec language used to make language specific operations

    Attributes
    ----------
    See parameters: they are stored as passed as instance variables.

    See Also
    --------
    services.cell.parser.APICellParser for detail about how the source cells
    are identified, parsed, and associated with HTTP verbs and paths.
    """
    def initialize(self, sources, response_sources, kernel_pool, kernel_name, kernel_language=''):
        self.kernel_pool = kernel_pool
        self.sources = sources
        self.kernel_name = kernel_name
        self.response_sources = response_sources
        self.kernel_language = kernel_language

    def finish_future(self, future, result_accumulator):
        """Resolves the promise to respond to a HTTP request handled by a
        kernel in the pool.

        Defines precedence for the kind of response:

        1. If any error occurred, resolve with a CodeExecutionError exception
        2. If any stream message was collected from the kernel, resolve with
            the joined string of stream messages
        3. If an execute_result was collected, resolve with that result object
            JSON encoded
        4. Resolve with an empty string

        Parameters
        ----------
        future : tornado.concurrent.Future
            Promise of a future response to an API request
        result_accumulator : dict
            Dictionary of results from a kernel with at least the keys error,
            stream, and result
        """
        if result_accumulator['error']:
            future.set_exception(CodeExecutionError(result_accumulator['error']))
        elif len(result_accumulator['stream']) > 0:
            future.set_result(''.join(result_accumulator['stream']))
        elif result_accumulator['result']:
            future.set_result(json.dumps(result_accumulator['result']))
        else:
            # If nothing was set, return an empty value
            future.set_result('')

    def on_recv(self, result_accumulator, future, parent_header, msg):
        """Collects ipoub messages associated with code execution request
        identified by `parent_header`.

        Continues collecting until an execution state of idle is reached.
        The first three parameters are typically applied in a partial.

        Parameters
        ----------
        result_accumulator : dict
            Accumulates data from `execute_result`, `stream` and `error`
            messages under keys `result`, `stream`, and `error` respectively
            across multiple invocations of this callback.
        future : tornado.concurrent.Future
            Promise to resolve when the kernel goes idle
        parent_header : dict
            Parent header from an `execute` request, used to identify messages
            that relate to its execution vs other executions
        msg : dict
            Kernel message received from the iopub channel
        """
        if msg['parent_header']['msg_id'] == parent_header:
            # On idle status, exit our loop
            if msg['header']['msg_type'] == 'status' and msg['content']['execution_state'] == 'idle':
                self.finish_future(future, result_accumulator)
            # Store the execute result
            elif msg['header']['msg_type'] == 'execute_result':
                result_accumulator['result'] = msg['content']['data']
            # Accumulate the stream messages
            elif msg['header']['msg_type'] == 'stream':
                # Only take stream output if it is on stdout or if the kernel
                # is non-confirming and does not name the stream
                if 'name' not in msg['content'] or msg['content']['name'] == 'stdout':
                    result_accumulator['stream'].append((msg['content']['text']))
            # Store the error message
            elif msg['header']['msg_type'] == 'error':
                error_name = msg['content']['ename']
                error_value = msg['content']['evalue']
                result_accumulator['error'] = 'Error {}: {} \n'.format(error_name, error_value)

    def execute_code(self, kernel_client, kernel_id, source_code):
        """Executes `source_code` on the kernel specified.

        Registers a callback for iopub messages. Promises to return the output
        of the execution in the future after the kernel returns to its idle
        state.

        Parameters
        ----------
        kernel_client : object
            Client to use to execute the code
        kernel_id : str
            ID of the kernel from the pool that will execute the request
        source_code : str
            Source code to execut

        Returns
        -------
        tornado.concurrent.Future
            Promise of execution result

        Raises
        ------
        CodeExecutionError
            If the kernel returns any error
        """
        future = Future()
        result_accumulator = {'stream' : [], 'error' : None, 'result' : None}
        parent_header = kernel_client.execute(source_code)
        on_recv_func = partial(self.on_recv, result_accumulator, future, parent_header)
        self.kernel_pool.on_recv(kernel_id, on_recv_func)
        return future

    @gen.coroutine
    def _handle_request(self):
        """Turns an HTTP request into annotated notebook code to execute on a
        kernel.

        Sets the HTTP response code, headers, and response body based on the
        result of the kernel execution. Then finishes the Tornado response.
        """
        self.response_future = Future()
        kernel_client, kernel_id = yield self.kernel_pool.acquire()
        try:
            # Method not supported
            if self.request.method not in self.sources:
                raise UnsupportedMethodError(self.request.method)

            # Set the Content-Type and status to default values
            self.set_header('Content-Type', 'text/plain')
            self.set_status(200)

            # Get the source to execute in response to this request
            source_code = self.sources[self.request.method]
            # Build the request dictionary
            request = json.dumps({
                'body' : parse_body(self.request),
                'args' : parse_args(self.request.query_arguments),
                'path' : self.path_kwargs,
                'headers' : headers_to_dict(self.request.headers)
            })
            # Turn the request string into a valid code string
            request_code = format_request(request, self.kernel_language)

            # Run the request and source code and yield until there's a result
            access_log.debug('Request code for notebook cell is: {}'.format(request_code))
            yield self.execute_code(kernel_client, kernel_id, request_code)
            source_result = yield self.execute_code(kernel_client, kernel_id, source_code)

            # If a response code cell exists, execute it
            if self.request.method in self.response_sources:
                response_code = self.response_sources[self.request.method]
                response_future = self.execute_code(kernel_client, kernel_id, response_code)

                # Wait for the response and parse the json value
                response_result = yield response_future
                response = json.loads(response_result)

                # Copy all the header values into the tornado response
                if 'headers' in response:
                    for header in response['headers']:
                        self.set_header(header, response['headers'][header])

                # Set the status code if it exists
                if 'status' in response:
                    self.set_status(response['status'])

            # Write the result of the source code execution
            if source_result:
                self.write(source_result)
        # If there was a problem executing an code, return a 500
        except CodeExecutionError as err:
            self.write(str(err))
            self.set_status(500)
        # An unspported method was called on this handler
        except UnsupportedMethodError:
            self.set_status(405)
        finally:
            # Always make sure we release the kernel and finish the request
            self.response_future.set_result(None)
            self.kernel_pool.release(kernel_id)
            self.finish()

    @gen.coroutine
    def get(self, **kwargs):
        self._handle_request()
        yield self.response_future

    @gen.coroutine
    def post(self, **kwargs):
        self._handle_request()
        yield self.response_future

    @gen.coroutine
    def put(self, **kwargs):
        self._handle_request()
        yield self.response_future

    @gen.coroutine
    def delete(self, **kwargs):
        self._handle_request()
        yield self.response_future

    def options(self, **kwargs):
        self.finish()
示例#46
0
 def mock_method(*args):
     future = Future()
     future.set_result(None)
     return future
示例#47
0
def create_future(ret_val=None):
    future = Future()
    future.set_result(ret_val)
    return future
示例#48
0
class ZMQChannelsHandler(AuthenticatedZMQStreamHandler):
    '''There is one ZMQChannelsHandler per running kernel and it oversees all
    the sessions.
    '''

    # class-level registry of open sessions
    # allows checking for conflict on session-id,
    # which is used as a zmq identity and must be unique.
    _open_sessions = {}

    @property
    def kernel_info_timeout(self):
        km_default = self.kernel_manager.kernel_info_timeout
        return self.settings.get('kernel_info_timeout', km_default)

    @property
    def iopub_msg_rate_limit(self):
        return self.settings.get('iopub_msg_rate_limit', 0)

    @property
    def iopub_data_rate_limit(self):
        return self.settings.get('iopub_data_rate_limit', 0)

    @property
    def rate_limit_window(self):
        return self.settings.get('rate_limit_window', 1.0)

    def __repr__(self):
        return "%s(%s)" % (self.__class__.__name__,
                           getattr(self, 'kernel_id', 'uninitialized'))

    def create_stream(self):
        km = self.kernel_manager
        identity = self.session.bsession
        for channel in ('shell', 'control', 'iopub', 'stdin'):
            meth = getattr(km, 'connect_' + channel)
            self.channels[channel] = stream = meth(self.kernel_id,
                                                   identity=identity)
            stream.channel = channel

    def request_kernel_info(self):
        """send a request for kernel_info"""
        km = self.kernel_manager
        kernel = km.get_kernel(self.kernel_id)
        try:
            # check for previous request
            future = kernel._kernel_info_future
        except AttributeError:
            self.log.debug("Requesting kernel info from %s", self.kernel_id)
            # Create a kernel_info channel to query the kernel protocol version.
            # This channel will be closed after the kernel_info reply is received.
            if self.kernel_info_channel is None:
                self.kernel_info_channel = km.connect_shell(self.kernel_id)
            self.kernel_info_channel.on_recv(self._handle_kernel_info_reply)
            self.session.send(self.kernel_info_channel, "kernel_info_request")
            # store the future on the kernel, so only one request is sent
            kernel._kernel_info_future = self._kernel_info_future
        else:
            if not future.done():
                self.log.debug("Waiting for pending kernel_info request")
            future.add_done_callback(
                lambda f: self._finish_kernel_info(f.result()))
        return self._kernel_info_future

    def _handle_kernel_info_reply(self, msg):
        """process the kernel_info_reply

        enabling msg spec adaptation, if necessary
        """
        idents, msg = self.session.feed_identities(msg)
        try:
            msg = self.session.deserialize(msg)
        except:
            self.log.error("Bad kernel_info reply", exc_info=True)
            self._kernel_info_future.set_result({})
            return
        else:
            info = msg['content']
            self.log.debug("Received kernel info: %s", info)
            if msg['msg_type'] != 'kernel_info_reply' or 'protocol_version' not in info:
                self.log.error(
                    "Kernel info request failed, assuming current %s", info)
                info = {}
            self._finish_kernel_info(info)

        # close the kernel_info channel, we don't need it anymore
        if self.kernel_info_channel:
            self.kernel_info_channel.close()
        self.kernel_info_channel = None

    def _finish_kernel_info(self, info):
        """Finish handling kernel_info reply

        Set up protocol adaptation, if needed,
        and signal that connection can continue.
        """
        protocol_version = info.get('protocol_version',
                                    client_protocol_version)
        if protocol_version != client_protocol_version:
            self.session.adapt_version = int(protocol_version.split('.')[0])
            self.log.info("Adapting to protocol v%s for kernel %s",
                          protocol_version, self.kernel_id)
        if not self._kernel_info_future.done():
            self._kernel_info_future.set_result(info)

    def initialize(self):
        super(ZMQChannelsHandler, self).initialize()
        self.zmq_stream = None
        self.channels = {}
        self.kernel_id = None
        self.kernel_info_channel = None
        self._kernel_info_future = Future()
        self._close_future = Future()
        self.session_key = ''

        # Rate limiting code
        self._iopub_window_msg_count = 0
        self._iopub_window_byte_count = 0
        self._iopub_msgs_exceeded = False
        self._iopub_data_exceeded = False
        # Queue of (time stamp, byte count)
        # Allows you to specify that the byte count should be lowered
        # by a delta amount at some point in the future.
        self._iopub_window_byte_queue = []

    @gen.coroutine
    def pre_get(self):
        # authenticate first
        super(ZMQChannelsHandler, self).pre_get()
        # check session collision:
        yield self._register_session()
        # then request kernel info, waiting up to a certain time before giving up.
        # We don't want to wait forever, because browsers don't take it well when
        # servers never respond to websocket connection requests.
        kernel = self.kernel_manager.get_kernel(self.kernel_id)
        self.session.key = kernel.session.key
        future = self.request_kernel_info()

        def give_up():
            """Don't wait forever for the kernel to reply"""
            if future.done():
                return
            self.log.warning("Timeout waiting for kernel_info reply from %s",
                             self.kernel_id)
            future.set_result({})

        loop = IOLoop.current()
        loop.add_timeout(loop.time() + self.kernel_info_timeout, give_up)
        # actually wait for it
        yield future

    @gen.coroutine
    def get(self, kernel_id):
        self.kernel_id = cast_unicode(kernel_id, 'ascii')
        yield super(ZMQChannelsHandler, self).get(kernel_id=kernel_id)

    @gen.coroutine
    def _register_session(self):
        """Ensure we aren't creating a duplicate session.

        If a previous identical session is still open, close it to avoid collisions.
        This is likely due to a client reconnecting from a lost network connection,
        where the socket on our side has not been cleaned up yet.
        """
        self.session_key = '%s:%s' % (self.kernel_id, self.session.session)
        stale_handler = self._open_sessions.get(self.session_key)
        if stale_handler:
            self.log.warning("Replacing stale connection: %s",
                             self.session_key)
            yield stale_handler.close()
        self._open_sessions[self.session_key] = self

    def open(self, kernel_id):
        super(ZMQChannelsHandler, self).open()
        km = self.kernel_manager
        km.notify_connect(kernel_id)

        # on new connections, flush the message buffer
        buffer_info = km.get_buffer(kernel_id, self.session_key)
        if buffer_info and buffer_info['session_key'] == self.session_key:
            self.log.info("Restoring connection for %s", self.session_key)
            self.channels = buffer_info['channels']
            replay_buffer = buffer_info['buffer']
            if replay_buffer:
                self.log.info("Replaying %s buffered messages",
                              len(replay_buffer))
                for channel, msg_list in replay_buffer:
                    stream = self.channels[channel]
                    self._on_zmq_reply(stream, msg_list)
        else:
            try:
                self.create_stream()
            except web.HTTPError as e:
                self.log.error("Error opening stream: %s", e)
                # WebSockets don't response to traditional error codes so we
                # close the connection.
                for channel, stream in self.channels.items():
                    if not stream.closed():
                        stream.close()
                self.close()
                return

        km.add_restart_callback(self.kernel_id, self.on_kernel_restarted)
        km.add_restart_callback(self.kernel_id, self.on_restart_failed, 'dead')

        for channel, stream in self.channels.items():
            stream.on_recv_stream(self._on_zmq_reply)

    def on_message(self, msg):
        if not self.channels:
            # already closed, ignore the message
            self.log.debug("Received message on closed websocket %r", msg)
            return
        if isinstance(msg, bytes):
            msg = deserialize_binary_message(msg)
        else:
            msg = json.loads(msg)
        channel = msg.pop('channel', None)
        if channel is None:
            self.log.warning("No channel specified, assuming shell: %s", msg)
            channel = 'shell'
        if channel not in self.channels:
            self.log.warning("No such channel: %r", channel)
            return
        am = self.kernel_manager.allowed_message_types
        mt = msg['header']['msg_type']
        if am and mt not in am:
            self.log.warning(
                'Received message of type "%s", which is not allowed. Ignoring.'
                % mt)
        else:
            stream = self.channels[channel]
            self.session.send(stream, msg)

    def _on_zmq_reply(self, stream, msg_list):
        idents, fed_msg_list = self.session.feed_identities(msg_list)
        msg = self.session.deserialize(fed_msg_list)
        parent = msg['parent_header']

        def write_stderr(error_message):
            self.log.warning(error_message)
            msg = self.session.msg("stream",
                                   content={
                                       "text": error_message + '\n',
                                       "name": "stderr"
                                   },
                                   parent=parent)
            msg['channel'] = 'iopub'
            self.write_message(json.dumps(msg, default=date_default))

        channel = getattr(stream, 'channel', None)
        msg_type = msg['header']['msg_type']

        if channel == 'iopub' and msg_type == 'status' and msg['content'].get(
                'execution_state') == 'idle':
            # reset rate limit counter on status=idle,
            # to avoid 'Run All' hitting limits prematurely.
            self._iopub_window_byte_queue = []
            self._iopub_window_msg_count = 0
            self._iopub_window_byte_count = 0
            self._iopub_msgs_exceeded = False
            self._iopub_data_exceeded = False

        if channel == 'iopub' and msg_type not in {
                'status', 'comm_open', 'execute_input'
        }:

            # Remove the counts queued for removal.
            now = IOLoop.current().time()
            while len(self._iopub_window_byte_queue) > 0:
                queued = self._iopub_window_byte_queue[0]
                if (now >= queued[0]):
                    self._iopub_window_byte_count -= queued[1]
                    self._iopub_window_msg_count -= 1
                    del self._iopub_window_byte_queue[0]
                else:
                    # This part of the queue hasn't be reached yet, so we can
                    # abort the loop.
                    break

            # Increment the bytes and message count
            self._iopub_window_msg_count += 1
            if msg_type == 'stream':
                byte_count = sum([len(x) for x in msg_list])
            else:
                byte_count = 0
            self._iopub_window_byte_count += byte_count

            # Queue a removal of the byte and message count for a time in the
            # future, when we are no longer interested in it.
            self._iopub_window_byte_queue.append(
                (now + self.rate_limit_window, byte_count))

            # Check the limits, set the limit flags, and reset the
            # message and data counts.
            msg_rate = float(
                self._iopub_window_msg_count) / self.rate_limit_window
            data_rate = float(
                self._iopub_window_byte_count) / self.rate_limit_window

            # Check the msg rate
            if self.iopub_msg_rate_limit > 0 and msg_rate > self.iopub_msg_rate_limit:
                if not self._iopub_msgs_exceeded:
                    self._iopub_msgs_exceeded = True
                    write_stderr(
                        dedent("""\
                    IOPub message rate exceeded.
                    The Jupyter server will temporarily stop sending output
                    to the client in order to avoid crashing it.
                    To change this limit, set the config variable
                    `--ServerApp.iopub_msg_rate_limit`.

                    Current values:
                    ServerApp.iopub_msg_rate_limit={} (msgs/sec)
                    ServerApp.rate_limit_window={} (secs)
                    """.format(self.iopub_msg_rate_limit,
                               self.rate_limit_window)))
            else:
                # resume once we've got some headroom below the limit
                if self._iopub_msgs_exceeded and msg_rate < (
                        0.8 * self.iopub_msg_rate_limit):
                    self._iopub_msgs_exceeded = False
                    if not self._iopub_data_exceeded:
                        self.log.warning("iopub messages resumed")

            # Check the data rate
            if self.iopub_data_rate_limit > 0 and data_rate > self.iopub_data_rate_limit:
                if not self._iopub_data_exceeded:
                    self._iopub_data_exceeded = True
                    write_stderr(
                        dedent("""\
                    IOPub data rate exceeded.
                    The Jupyter server will temporarily stop sending output
                    to the client in order to avoid crashing it.
                    To change this limit, set the config variable
                    `--ServerApp.iopub_data_rate_limit`.

                    Current values:
                    ServerApp.iopub_data_rate_limit={} (bytes/sec)
                    ServerApp.rate_limit_window={} (secs)
                    """.format(self.iopub_data_rate_limit,
                               self.rate_limit_window)))
            else:
                # resume once we've got some headroom below the limit
                if self._iopub_data_exceeded and data_rate < (
                        0.8 * self.iopub_data_rate_limit):
                    self._iopub_data_exceeded = False
                    if not self._iopub_msgs_exceeded:
                        self.log.warning("iopub messages resumed")

            # If either of the limit flags are set, do not send the message.
            if self._iopub_msgs_exceeded or self._iopub_data_exceeded:
                # we didn't send it, remove the current message from the calculus
                self._iopub_window_msg_count -= 1
                self._iopub_window_byte_count -= byte_count
                self._iopub_window_byte_queue.pop(-1)
                return
        super(ZMQChannelsHandler, self)._on_zmq_reply(stream, msg)

    def close(self):
        super(ZMQChannelsHandler, self).close()
        return self._close_future

    def on_close(self):
        self.log.debug("Websocket closed %s", self.session_key)
        # unregister myself as an open session (only if it's really me)
        if self._open_sessions.get(self.session_key) is self:
            self._open_sessions.pop(self.session_key)

        km = self.kernel_manager
        if self.kernel_id in km:
            km.notify_disconnect(self.kernel_id)
            km.remove_restart_callback(
                self.kernel_id,
                self.on_kernel_restarted,
            )
            km.remove_restart_callback(
                self.kernel_id,
                self.on_restart_failed,
                'dead',
            )

            # start buffering instead of closing if this was the last connection
            if km._kernel_connections[self.kernel_id] == 0:
                km.start_buffering(self.kernel_id, self.session_key,
                                   self.channels)
                self._close_future.set_result(None)
                return

        # This method can be called twice, once by self.kernel_died and once
        # from the WebSocket close event. If the WebSocket connection is
        # closed before the ZMQ streams are setup, they could be None.
        for channel, stream in self.channels.items():
            if stream is not None and not stream.closed():
                stream.on_recv(None)
                stream.close()

        self.channels = {}
        self._close_future.set_result(None)

    def _send_status_message(self, status):
        msg = self.session.msg("status", {'execution_state': status})
        msg['channel'] = 'iopub'
        self.write_message(json.dumps(msg, default=date_default))

    def on_kernel_restarted(self):
        logging.warn("kernel %s restarted", self.kernel_id)
        self._send_status_message('restarting')

    def on_restart_failed(self):
        logging.error("kernel %s restarted failed!", self.kernel_id)
        self._send_status_message('dead')
示例#49
0
 def write_headers(self, start_line, headers, chunk=None, callback=None):
     """Implements `.HTTPConnection.write_headers`."""
     lines = []
     if self.is_client:
         self._request_start_line = start_line
         lines.append(
             utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
         # Client requests with a non-empty body must have either a
         # Content-Length or a Transfer-Encoding.
         self._chunking_output = (start_line.method
                                  in ('POST', 'PUT', 'PATCH')
                                  and 'Content-Length' not in headers
                                  and 'Transfer-Encoding' not in headers)
     else:
         self._response_start_line = start_line
         lines.append(
             utf8('HTTP/1.1 %s %s' % (start_line[1], start_line[2])))
         self._chunking_output = (
             # TODO: should this use
             # self._request_start_line.version or
             # start_line.version?
             self._request_start_line.version == 'HTTP/1.1' and
             # 304 responses have no body (not even a zero-length body), and so
             # should not have either Content-Length or Transfer-Encoding.
             # headers.
             start_line.code != 304 and
             # No need to chunk the output if a Content-Length is specified.
             'Content-Length' not in headers and
             # Applications are discouraged from touching Transfer-Encoding,
             # but if they do, leave it alone.
             'Transfer-Encoding' not in headers)
         # If a 1.0 client asked for keep-alive, add the header.
         if (self._request_start_line.version == 'HTTP/1.0'
                 and (self._request_headers.get('Connection', '').lower()
                      == 'keep-alive')):
             headers['Connection'] = 'Keep-Alive'
     if self._chunking_output:
         headers['Transfer-Encoding'] = 'chunked'
     if (not self.is_client and (self._request_start_line.method == 'HEAD'
                                 or start_line.code == 304)):
         self._expected_content_remaining = 0
     elif 'Content-Length' in headers:
         self._expected_content_remaining = int(headers['Content-Length'])
     else:
         self._expected_content_remaining = None
     lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()])
     for line in lines:
         if b'\n' in line:
             raise ValueError('Newline in header: ' + repr(line))
     future = None
     if self.stream.closed():
         future = self._write_future = Future()
         future.set_exception(iostream.StreamClosedError())
         future.exception()
     else:
         if callback is not None:
             self._write_callback = stack_context.wrap(callback)
         else:
             future = self._write_future = Future()
         data = b"\r\n".join(lines) + b"\r\n\r\n"
         if chunk:
             data += self._format_chunk(chunk)
         self._pending_write = self.stream.write(data)
         self._pending_write.add_done_callback(self._on_write_complete)
     return future
    def _handle_request(self):
        """Turns an HTTP request into annotated notebook code to execute on a
        kernel.

        Sets the HTTP response code, headers, and response body based on the
        result of the kernel execution. Then finishes the Tornado response.
        """
        self.response_future = Future()
        kernel_client, kernel_id = yield self.kernel_pool.acquire()
        try:
            # Method not supported
            if self.request.method not in self.sources:
                raise UnsupportedMethodError(self.request.method)

            # Set the Content-Type and status to default values
            self.set_header('Content-Type', 'text/plain')
            self.set_status(200)

            # Get the source to execute in response to this request
            source_code = self.sources[self.request.method]
            # Build the request dictionary
            request = json.dumps({
                'body' : parse_body(self.request),
                'args' : parse_args(self.request.query_arguments),
                'path' : self.path_kwargs,
                'headers' : headers_to_dict(self.request.headers)
            })
            # Turn the request string into a valid code string
            request_code = format_request(request, self.kernel_language)

            # Run the request and source code and yield until there's a result
            access_log.debug('Request code for notebook cell is: {}'.format(request_code))
            yield self.execute_code(kernel_client, kernel_id, request_code)
            source_result = yield self.execute_code(kernel_client, kernel_id, source_code)

            # If a response code cell exists, execute it
            if self.request.method in self.response_sources:
                response_code = self.response_sources[self.request.method]
                response_future = self.execute_code(kernel_client, kernel_id, response_code)

                # Wait for the response and parse the json value
                response_result = yield response_future
                response = json.loads(response_result)

                # Copy all the header values into the tornado response
                if 'headers' in response:
                    for header in response['headers']:
                        self.set_header(header, response['headers'][header])

                # Set the status code if it exists
                if 'status' in response:
                    self.set_status(response['status'])

            # Write the result of the source code execution
            if source_result:
                self.write(source_result)
        # If there was a problem executing an code, return a 500
        except CodeExecutionError as err:
            self.write(str(err))
            self.set_status(500)
        # An unspported method was called on this handler
        except UnsupportedMethodError:
            self.set_status(405)
        finally:
            # Always make sure we release the kernel and finish the request
            self.response_future.set_result(None)
            self.kernel_pool.release(kernel_id)
            self.finish()
示例#51
0
 def resolve(self, *args, **kwargs):
     return Future()  # never completes
示例#52
0
class HTTP1Connection(httputil.HTTPConnection):
    """Implements the HTTP/1.x protocol.

    This class can be on its own for clients, or via `HTTP1ServerConnection`
    for servers.
    """
    def __init__(self, stream, is_client, params=None, context=None):
        """
        :arg stream: an `.IOStream`
        :arg bool is_client: client or server
        :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
        :arg context: an opaque application-defined object that can be accessed
            as ``connection.context``.
        """
        self.is_client = is_client
        self.stream = stream
        if params is None:
            params = HTTP1ConnectionParameters()
        self.params = params
        self.context = context
        self.no_keep_alive = params.no_keep_alive
        # The body limits can be altered by the delegate, so save them
        # here instead of just referencing self.params later.
        self._max_body_size = (self.params.max_body_size
                               or self.stream.max_buffer_size)
        self._body_timeout = self.params.body_timeout
        # _write_finished is set to True when finish() has been called,
        # i.e. there will be no more data sent.  Data may still be in the
        # stream's write buffer.
        self._write_finished = False
        # True when we have read the entire incoming body.
        self._read_finished = False
        # _finish_future resolves when all data has been written and flushed
        # to the IOStream.
        self._finish_future = Future()
        # If true, the connection should be closed after this request
        # (after the response has been written in the server side,
        # and after it has been read in the client)
        self._disconnect_on_finish = False
        self._clear_callbacks()
        # Save the start lines after we read or write them; they
        # affect later processing (e.g. 304 responses and HEAD methods
        # have content-length but no bodies)
        self._request_start_line = None
        self._response_start_line = None
        self._request_headers = None
        # True if we are writing output with chunked encoding.
        self._chunking_output = None
        # While reading a body with a content-length, this is the
        # amount left to read.
        self._expected_content_remaining = None
        # A Future for our outgoing writes, returned by IOStream.write.
        self._pending_write = None

    def read_response(self, delegate):
        """Read a single HTTP response.

        Typical client-mode usage is to write a request using `write_headers`,
        `write`, and `finish`, and then call ``read_response``.

        :arg delegate: a `.HTTPMessageDelegate`

        Returns a `.Future` that resolves to None after the full response has
        been read.
        """
        if self.params.decompress:
            delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
        return self._read_message(delegate)

    @gen.coroutine
    def _read_message(self, delegate):
        need_delegate_close = False
        try:
            header_future = self.stream.read_until_regex(
                b"\r?\n\r?\n", max_bytes=self.params.max_header_size)
            if self.params.header_timeout is None:
                header_data = yield header_future
            else:
                try:
                    header_data = yield gen.with_timeout(
                        self.stream.io_loop.time() +
                        self.params.header_timeout,
                        header_future,
                        io_loop=self.stream.io_loop,
                        quiet_exceptions=iostream.StreamClosedError)
                except gen.TimeoutError:
                    self.close()
                    raise gen.Return(False)
            start_line, headers = self._parse_headers(header_data)
            if self.is_client:
                start_line = httputil.parse_response_start_line(start_line)
                self._response_start_line = start_line
            else:
                start_line = httputil.parse_request_start_line(start_line)
                self._request_start_line = start_line
                self._request_headers = headers

            self._disconnect_on_finish = not self._can_keep_alive(
                start_line, headers)
            need_delegate_close = True
            with _ExceptionLoggingContext(app_log):
                header_future = delegate.headers_received(start_line, headers)
                if header_future is not None:
                    yield header_future
            if self.stream is None:
                # We've been detached.
                need_delegate_close = False
                raise gen.Return(False)
            skip_body = False
            if self.is_client:
                if (self._request_start_line is not None
                        and self._request_start_line.method == 'HEAD'):
                    skip_body = True
                code = start_line.code
                if code == 304:
                    # 304 responses may include the content-length header
                    # but do not actually have a body.
                    # http://tools.ietf.org/html/rfc7230#section-3.3
                    skip_body = True
                if code >= 100 and code < 200:
                    # 1xx responses should never indicate the presence of
                    # a body.
                    if ('Content-Length' in headers
                            or 'Transfer-Encoding' in headers):
                        raise httputil.HTTPInputError(
                            "Response code %d cannot have body" % code)
                    # TODO: client delegates will get headers_received twice
                    # in the case of a 100-continue.  Document or change?
                    yield self._read_message(delegate)
            else:
                if (headers.get("Expect") == "100-continue"
                        and not self._write_finished):
                    self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
            if not skip_body:
                body_future = self._read_body(
                    start_line.code if self.is_client else 0, headers,
                    delegate)
                if body_future is not None:
                    if self._body_timeout is None:
                        yield body_future
                    else:
                        try:
                            yield gen.with_timeout(
                                self.stream.io_loop.time() +
                                self._body_timeout,
                                body_future,
                                self.stream.io_loop,
                                quiet_exceptions=iostream.StreamClosedError)
                        except gen.TimeoutError:
                            gen_log.info("Timeout reading body from %s",
                                         self.context)
                            self.stream.close()
                            raise gen.Return(False)
            self._read_finished = True
            if not self._write_finished or self.is_client:
                need_delegate_close = False
                with _ExceptionLoggingContext(app_log):
                    delegate.finish()
            # If we're waiting for the application to produce an asynchronous
            # response, and we're not detached, register a close callback
            # on the stream (we didn't need one while we were reading)
            if (not self._finish_future.done() and self.stream is not None
                    and not self.stream.closed()):
                self.stream.set_close_callback(self._on_connection_close)
                yield self._finish_future
            if self.is_client and self._disconnect_on_finish:
                self.close()
            if self.stream is None:
                raise gen.Return(False)
        except httputil.HTTPInputError as e:
            gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
            self.close()
            raise gen.Return(False)
        finally:
            if need_delegate_close:
                with _ExceptionLoggingContext(app_log):
                    delegate.on_connection_close()
            self._clear_callbacks()
        raise gen.Return(True)

    def _clear_callbacks(self):
        """Clears the callback attributes.

        This allows the request handler to be garbage collected more
        quickly in CPython by breaking up reference cycles.
        """
        self._write_callback = None
        self._write_future = None
        self._close_callback = None
        if self.stream is not None:
            self.stream.set_close_callback(None)

    def set_close_callback(self, callback):
        """Sets a callback that will be run when the connection is closed.

        .. deprecated:: 4.0
            Use `.HTTPMessageDelegate.on_connection_close` instead.
        """
        self._close_callback = stack_context.wrap(callback)

    def _on_connection_close(self):
        # Note that this callback is only registered on the IOStream
        # when we have finished reading the request and are waiting for
        # the application to produce its response.
        if self._close_callback is not None:
            callback = self._close_callback
            self._close_callback = None
            callback()
        if not self._finish_future.done():
            self._finish_future.set_result(None)
        self._clear_callbacks()

    def close(self):
        if self.stream is not None:
            self.stream.close()
        self._clear_callbacks()
        if not self._finish_future.done():
            self._finish_future.set_result(None)

    def detach(self):
        """Take control of the underlying stream.

        Returns the underlying `.IOStream` object and stops all further
        HTTP processing.  May only be called during
        `.HTTPMessageDelegate.headers_received`.  Intended for implementing
        protocols like websockets that tunnel over an HTTP handshake.
        """
        self._clear_callbacks()
        stream = self.stream
        self.stream = None
        if not self._finish_future.done():
            self._finish_future.set_result(None)
        return stream

    def set_body_timeout(self, timeout):
        """Sets the body timeout for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._body_timeout = timeout

    def set_max_body_size(self, max_body_size):
        """Sets the body size limit for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._max_body_size = max_body_size

    def write_headers(self, start_line, headers, chunk=None, callback=None):
        """Implements `.HTTPConnection.write_headers`."""
        lines = []
        if self.is_client:
            self._request_start_line = start_line
            lines.append(
                utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1])))
            # Client requests with a non-empty body must have either a
            # Content-Length or a Transfer-Encoding.
            self._chunking_output = (start_line.method
                                     in ('POST', 'PUT', 'PATCH')
                                     and 'Content-Length' not in headers
                                     and 'Transfer-Encoding' not in headers)
        else:
            self._response_start_line = start_line
            lines.append(
                utf8('HTTP/1.1 %s %s' % (start_line[1], start_line[2])))
            self._chunking_output = (
                # TODO: should this use
                # self._request_start_line.version or
                # start_line.version?
                self._request_start_line.version == 'HTTP/1.1' and
                # 304 responses have no body (not even a zero-length body), and so
                # should not have either Content-Length or Transfer-Encoding.
                # headers.
                start_line.code != 304 and
                # No need to chunk the output if a Content-Length is specified.
                'Content-Length' not in headers and
                # Applications are discouraged from touching Transfer-Encoding,
                # but if they do, leave it alone.
                'Transfer-Encoding' not in headers)
            # If a 1.0 client asked for keep-alive, add the header.
            if (self._request_start_line.version == 'HTTP/1.0'
                    and (self._request_headers.get('Connection', '').lower()
                         == 'keep-alive')):
                headers['Connection'] = 'Keep-Alive'
        if self._chunking_output:
            headers['Transfer-Encoding'] = 'chunked'
        if (not self.is_client and (self._request_start_line.method == 'HEAD'
                                    or start_line.code == 304)):
            self._expected_content_remaining = 0
        elif 'Content-Length' in headers:
            self._expected_content_remaining = int(headers['Content-Length'])
        else:
            self._expected_content_remaining = None
        lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()])
        for line in lines:
            if b'\n' in line:
                raise ValueError('Newline in header: ' + repr(line))
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            future.set_exception(iostream.StreamClosedError())
            future.exception()
        else:
            if callback is not None:
                self._write_callback = stack_context.wrap(callback)
            else:
                future = self._write_future = Future()
            data = b"\r\n".join(lines) + b"\r\n\r\n"
            if chunk:
                data += self._format_chunk(chunk)
            self._pending_write = self.stream.write(data)
            self._pending_write.add_done_callback(self._on_write_complete)
        return future

    def _format_chunk(self, chunk):
        if self._expected_content_remaining is not None:
            self._expected_content_remaining -= len(chunk)
            if self._expected_content_remaining < 0:
                # Close the stream now to stop further framing errors.
                self.stream.close()
                raise httputil.HTTPOutputError(
                    "Tried to write more data than Content-Length")
        if self._chunking_output and chunk:
            # Don't write out empty chunks because that means END-OF-STREAM
            # with chunked encoding
            return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
        else:
            return chunk

    def write(self, chunk, callback=None):
        """Implements `.HTTPConnection.write`.

        For backwards compatibility is is allowed but deprecated to
        skip `write_headers` and instead call `write()` with a
        pre-encoded header block.
        """
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            self._write_future.set_exception(iostream.StreamClosedError())
            self._write_future.exception()
        else:
            if callback is not None:
                self._write_callback = stack_context.wrap(callback)
            else:
                future = self._write_future = Future()
            self._pending_write = self.stream.write(self._format_chunk(chunk))
            self._pending_write.add_done_callback(self._on_write_complete)
        return future

    def finish(self):
        """Implements `.HTTPConnection.finish`."""
        if (self._expected_content_remaining is not None
                and self._expected_content_remaining != 0
                and not self.stream.closed()):
            self.stream.close()
            raise httputil.HTTPOutputError(
                "Tried to write %d bytes less than Content-Length" %
                self._expected_content_remaining)
        if self._chunking_output:
            if not self.stream.closed():
                self._pending_write = self.stream.write(b"0\r\n\r\n")
                self._pending_write.add_done_callback(self._on_write_complete)
        self._write_finished = True
        # If the app finished the request while we're still reading,
        # divert any remaining data away from the delegate and
        # close the connection when we're done sending our response.
        # Closing the connection is the only way to avoid reading the
        # whole input body.
        if not self._read_finished:
            self._disconnect_on_finish = True
        # No more data is coming, so instruct TCP to send any remaining
        # data immediately instead of waiting for a full packet or ack.
        self.stream.set_nodelay(True)
        if self._pending_write is None:
            self._finish_request(None)
        else:
            self._pending_write.add_done_callback(self._finish_request)

    def _on_write_complete(self, future):
        exc = future.exception()
        if exc is not None and not isinstance(exc, iostream.StreamClosedError):
            future.result()
        if self._write_callback is not None:
            callback = self._write_callback
            self._write_callback = None
            self.stream.io_loop.add_callback(callback)
        if self._write_future is not None:
            future = self._write_future
            self._write_future = None
            future.set_result(None)

    def _can_keep_alive(self, start_line, headers):
        if self.params.no_keep_alive:
            return False
        connection_header = headers.get("Connection")
        if connection_header is not None:
            connection_header = connection_header.lower()
        if start_line.version == "HTTP/1.1":
            return connection_header != "close"
        elif ("Content-Length" in headers
              or headers.get("Transfer-Encoding", "").lower() == "chunked"
              or start_line.method in ("HEAD", "GET")):
            return connection_header == "keep-alive"
        return False

    def _finish_request(self, future):
        self._clear_callbacks()
        if not self.is_client and self._disconnect_on_finish:
            self.close()
            return
        # Turn Nagle's algorithm back on, leaving the stream in its
        # default state for the next request.
        self.stream.set_nodelay(False)
        if not self._finish_future.done():
            self._finish_future.set_result(None)

    def _parse_headers(self, data):
        # The lstrip removes newlines that some implementations sometimes
        # insert between messages of a reused connection.  Per RFC 7230,
        # we SHOULD ignore at least one empty line before the request.
        # http://tools.ietf.org/html/rfc7230#section-3.5
        data = native_str(data.decode('latin1')).lstrip("\r\n")
        # RFC 7230 section allows for both CRLF and bare LF.
        eol = data.find("\n")
        start_line = data[:eol].rstrip("\r")
        try:
            headers = httputil.HTTPHeaders.parse(data[eol:])
        except ValueError:
            # probably form split() if there was no ':' in the line
            raise httputil.HTTPInputError("Malformed HTTP headers: %r" %
                                          data[eol:100])
        return start_line, headers

    def _read_body(self, code, headers, delegate):
        if "Content-Length" in headers:
            if "," in headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise httputil.HTTPInputError(
                        "Multiple unequal Content-Lengths: %r" %
                        headers["Content-Length"])
                headers["Content-Length"] = pieces[0]
            content_length = int(headers["Content-Length"])

            if content_length > self._max_body_size:
                raise httputil.HTTPInputError("Content-Length too long")
        else:
            content_length = None

        if code == 204:
            # This response code is not allowed to have a non-empty body,
            # and has an implicit length of zero instead of read-until-close.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in headers
                    or content_length not in (None, 0)):
                raise httputil.HTTPInputError(
                    "Response with code %d should not have body" % code)
            content_length = 0

        if content_length is not None:
            return self._read_fixed_body(content_length, delegate)
        if headers.get("Transfer-Encoding") == "chunked":
            return self._read_chunked_body(delegate)
        if self.is_client:
            return self._read_body_until_close(delegate)
        return None

    @gen.coroutine
    def _read_fixed_body(self, content_length, delegate):
        while content_length > 0:
            body = yield self.stream.read_bytes(min(self.params.chunk_size,
                                                    content_length),
                                                partial=True)
            content_length -= len(body)
            if not self._write_finished or self.is_client:
                with _ExceptionLoggingContext(app_log):
                    ret = delegate.data_received(body)
                    if ret is not None:
                        yield ret

    @gen.coroutine
    def _read_chunked_body(self, delegate):
        # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
        total_size = 0
        while True:
            chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64)
            chunk_len = int(chunk_len.strip(), 16)
            if chunk_len == 0:
                return
            total_size += chunk_len
            if total_size > self._max_body_size:
                raise httputil.HTTPInputError("chunked body too large")
            bytes_to_read = chunk_len
            while bytes_to_read:
                chunk = yield self.stream.read_bytes(min(
                    bytes_to_read, self.params.chunk_size),
                                                     partial=True)
                bytes_to_read -= len(chunk)
                if not self._write_finished or self.is_client:
                    with _ExceptionLoggingContext(app_log):
                        ret = delegate.data_received(chunk)
                        if ret is not None:
                            yield ret
            # chunk ends with \r\n
            crlf = yield self.stream.read_bytes(2)
            assert crlf == b"\r\n"

    @gen.coroutine
    def _read_body_until_close(self, delegate):
        body = yield self.stream.read_until_close()
        if not self._write_finished or self.is_client:
            with _ExceptionLoggingContext(app_log):
                delegate.data_received(body)
示例#53
0
 def write_headers(
     self,
     start_line: Union[httputil.RequestStartLine,
                       httputil.ResponseStartLine],
     headers: httputil.HTTPHeaders,
     chunk: Optional[bytes] = None,
 ) -> "Future[None]":
     """Implements `.HTTPConnection.write_headers`."""
     lines = []
     if self.is_client:
         assert isinstance(start_line, httputil.RequestStartLine)
         self._request_start_line = start_line
         lines.append(
             utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1])))
         # Client requests with a non-empty body must have either a
         # Content-Length or a Transfer-Encoding.
         self._chunking_output = (
             start_line.method in ("POST", "PUT", "PATCH")
             and "Content-Length" not in headers
             and ("Transfer-Encoding" not in headers
                  or headers["Transfer-Encoding"] == "chunked"))
     else:
         assert isinstance(start_line, httputil.ResponseStartLine)
         assert self._request_start_line is not None
         assert self._request_headers is not None
         self._response_start_line = start_line
         lines.append(
             utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
         self._chunking_output = (
             # TODO: should this use
             # self._request_start_line.version or
             # start_line.version?
             self._request_start_line.version == "HTTP/1.1"
             # Omit payload header field for HEAD request.
             and self._request_start_line.method != "HEAD"
             # 1xx, 204 and 304 responses have no body (not even a zero-length
             # body), and so should not have either Content-Length or
             # Transfer-Encoding headers.
             and start_line.code not in (204, 304)
             and (start_line.code < 100 or start_line.code >= 200)
             # No need to chunk the output if a Content-Length is specified.
             and "Content-Length" not in headers
             # Applications are discouraged from touching Transfer-Encoding,
             # but if they do, leave it alone.
             and "Transfer-Encoding" not in headers)
         # If connection to a 1.1 client will be closed, inform client
         if (self._request_start_line.version == "HTTP/1.1"
                 and self._disconnect_on_finish):
             headers["Connection"] = "close"
         # If a 1.0 client asked for keep-alive, add the header.
         if (self._request_start_line.version == "HTTP/1.0"
                 and self._request_headers.get("Connection",
                                               "").lower() == "keep-alive"):
             headers["Connection"] = "Keep-Alive"
     if self._chunking_output:
         headers["Transfer-Encoding"] = "chunked"
     if not self.is_client and (self._request_start_line.method == "HEAD"
                                or cast(httputil.ResponseStartLine,
                                        start_line).code == 304):
         self._expected_content_remaining = 0
     elif "Content-Length" in headers:
         self._expected_content_remaining = int(headers["Content-Length"])
     else:
         self._expected_content_remaining = None
     # TODO: headers are supposed to be of type str, but we still have some
     # cases that let bytes slip through. Remove these native_str calls when those
     # are fixed.
     header_lines = (native_str(n) + ": " + native_str(v)
                     for n, v in headers.get_all())
     lines.extend(line.encode("latin1") for line in header_lines)
     for line in lines:
         if b"\n" in line:
             raise ValueError("Newline in header: " + repr(line))
     future = None
     if self.stream.closed():
         future = self._write_future = Future()
         future.set_exception(iostream.StreamClosedError())
         future.exception()
     else:
         future = self._write_future = Future()
         data = b"\r\n".join(lines) + b"\r\n\r\n"
         if chunk:
             data += self._format_chunk(chunk)
         self._pending_write = self.stream.write(data)
         future_add_done_callback(self._pending_write,
                                  self._on_write_complete)
     return future
    def get(self, *args, **kwargs):
        global future
        future = Future()
        future.add_done_callback(self.done)

        yield future
示例#55
0
    def _request_helper(self, options_func, cancellation_event):
        if cancellation_event is not None:
            assert isinstance(cancellation_event, Event)

        # validate_params()
        options = options_func()

        create_response = options.create_response
        create_status_response = options.create_status

        params_to_merge_in = {}

        if options.operation_type == PNOperationType.PNPublishOperation:
            params_to_merge_in[
                'seqn'] = self._publish_sequence_manager.get_next_sequence()

        options.merge_params_in(params_to_merge_in)

        future = Future()

        url = utils.build_url(self.config.scheme(), self.base_origin,
                              options.path, options.query_string)

        logger.debug("%s %s %s" % (options.method_string, url, options.data))

        start_timestamp = time.time()

        request = tornado.httpclient.HTTPRequest(
            url=url,
            method=options.method_string,
            headers=self.headers,
            body=options.data if options.data is not None else None,
            connect_timeout=options.connect_timeout,
            request_timeout=options.request_timeout)

        def response_callback(response):
            if cancellation_event is not None and cancellation_event.is_set():
                return

            body = response.body
            response_info = None
            status_category = PNStatusCategory.PNUnknownCategory

            if response is not None:
                request_url = six.moves.urllib.parse.urlparse(
                    response.effective_url)
                query = six.moves.urllib.parse.parse_qs(request_url.query)
                uuid = None
                auth_key = None

                if 'uuid' in query and len(query['uuid']) > 0:
                    uuid = query['uuid'][0]

                if 'auth_key' in query and len(query['auth_key']) > 0:
                    auth_key = query['auth_key'][0]

                response_info = ResponseInfo(
                    status_code=response.code,
                    tls_enabled='https' == request_url.scheme,
                    origin=request_url.hostname,
                    uuid=uuid,
                    auth_key=auth_key,
                    client_request=response.request)

            if body is not None and len(body) > 0:
                try:
                    data = json.loads(body)
                except (ValueError, TypeError):
                    try:
                        data = json.loads(body.decode("utf-8"))
                    except ValueError:
                        tornado_result = PubNubTornadoException(
                            create_response(None),
                            create_status_response(
                                status_category, response, response_info,
                                PubNubException(
                                    pn_error=PNERR_JSON_DECODING_FAILED,
                                    errormsg='json decode error')))
                        future.set_exception(tornado_result)
                        return
            else:
                data = "N/A"

            logger.debug(data)

            if response.error is not None:
                if response.code >= 500:
                    err = PNERR_SERVER_ERROR
                    data = str(response.error)
                else:
                    err = PNERR_CLIENT_ERROR

                e = PubNubException(
                    errormsg=data,
                    pn_error=err,
                    status_code=response.code,
                )

                if response.code == 403:
                    status_category = PNStatusCategory.PNAccessDeniedCategory

                if response.code == 400:
                    status_category = PNStatusCategory.PNBadRequestCategory

                if response.code == 599:
                    if 'HTTP 599: Timeout during request' == data:
                        status_category = PNStatusCategory.PNTimeoutCategory
                        e = PubNubException(pn_error=PNERR_CLIENT_TIMEOUT,
                                            errormsg=str(e))
                    elif 'HTTP 599: Stream closed' == data or\
                            'Name or service not known' in data or\
                            'Temporary failure in name resolution' in data:
                        status_category = PNStatusCategory.PNNetworkIssuesCategory
                        e = PubNubException(pn_error=PNERR_CONNECTION_ERROR,
                                            errormsg=str(e))
                        # TODO: add check for other status codes

                future.set_exception(
                    PubNubTornadoException(result=data,
                                           status=create_status_response(
                                               status_category, data,
                                               response_info, e)))
            else:
                self._telemetry_manager.store_latency(
                    time.time() - start_timestamp, options.operation_type)

                future.set_result(
                    TornadoEnvelope(
                        result=create_response(data),
                        status=create_status_response(
                            PNStatusCategory.PNAcknowledgmentCategory, data,
                            response_info, None)))

        self.http.fetch(request=request, callback=response_callback)

        return future
示例#56
0
def multi_future(children, quiet_exceptions=()):
    """Wait for multiple asynchronous futures in parallel.

    Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns
    a new Future that resolves when all the other Futures are done.
    If all the ``Futures`` succeeded, the returned Future's result is a list
    of their results.  If any failed, the returned Future raises the exception
    of the first one to fail.

    Instead of a list, the argument may also be a dictionary whose values are
    Futures, in which case a parallel dictionary is returned mapping the same
    keys to their results.

    It is not normally necessary to call `multi_future` explcitly,
    since the engine will do so automatically when the generator
    yields a list of ``Futures``. However, calling it directly
    allows you to use the ``quiet_exceptions`` argument to control
    the logging of multiple exceptions.

    This function is faster than the `Multi` `YieldPoint` because it
    does not require the creation of a stack context.

    .. versionadded:: 4.0

    .. versionchanged:: 4.2
       If multiple ``Futures`` fail, any exceptions after the first (which is
       raised) will be logged. Added the ``quiet_exceptions``
       argument to suppress this logging for selected exception types.
    """
    if isinstance(children, dict):
        keys = list(children.keys())
        children = children.values()
    else:
        keys = None
    assert all(is_future(i) for i in children)
    unfinished_children = set(children)

    future = Future()
    if not children:
        future.set_result({} if keys is not None else [])

    def callback(f):
        unfinished_children.remove(f)
        if not unfinished_children:
            result_list = []
            for f in children:
                try:
                    result_list.append(f.result())
                except Exception as e:
                    if future.done():
                        if not isinstance(e, quiet_exceptions):
                            app_log.error("Multiple exceptions in yield list",
                                          exc_info=True)
                    else:
                        future.set_exc_info(sys.exc_info())
            if not future.done():
                if keys is not None:
                    future.set_result(dict(zip(keys, result_list)))
                else:
                    future.set_result(result_list)

    listening = set()
    for f in children:
        if f not in listening:
            listening.add(f)
            f.add_done_callback(callback)
    return future
示例#57
0
def list_media(camera_config: dict, media_type: str, prefix=None) -> typing.Awaitable:
    fut = Future()
    target_dir = camera_config.get('target_dir')

    if media_type == 'picture':
        exts = _PICTURE_EXTS

    elif media_type == 'movie':
        exts = _MOVIE_EXTS

    # create a subprocess to retrieve media files
    def do_list_media(pipe):
        import mimetypes
        parent_pipe.close()

        mf = _list_media_files(target_dir, exts=exts, prefix=prefix)
        for (p, st) in mf:
            path = p[len(target_dir):]
            if not path.startswith('/'):
                path = '/' + path

            timestamp = st.st_mtime
            size = st.st_size

            pipe.send({
                'path': path,
                'mimeType': mimetypes.guess_type(path)[0] if mimetypes.guess_type(path)[
                                                                 0] is not None else 'video/mpeg',
                'momentStr': pretty_date_time(datetime.datetime.fromtimestamp(timestamp)),
                'momentStrShort': pretty_date_time(datetime.datetime.fromtimestamp(timestamp), short=True),
                'sizeStr': utils.pretty_size(size),
                'timestamp': timestamp
            })

        pipe.close()

    logging.debug('starting media listing process...')

    (parent_pipe, child_pipe) = multiprocessing.Pipe(duplex=False)
    process = multiprocessing.Process(target=do_list_media, args=(child_pipe,))
    process.start()
    child_pipe.close()

    # poll the subprocess to see when it has finished
    started = datetime.datetime.now()
    media_list = []

    def read_media_list():
        while parent_pipe.poll():
            try:
                media_list.append(parent_pipe.recv())

            except EOFError:
                break

    def poll_process():
        io_loop = IOLoop.instance()
        if process.is_alive():  # not finished yet
            now = datetime.datetime.now()
            delta = now - started
            if delta.seconds < settings.LIST_MEDIA_TIMEOUT:
                io_loop.add_timeout(datetime.timedelta(seconds=0.5), poll_process)
                read_media_list()

            else:  # process did not finish in time
                logging.error('timeout waiting for the media listing process to finish')
                try:
                    os.kill(process.pid, signal.SIGTERM)

                except:
                    pass  # nevermind

                fut.set_result(None)

        else:  # finished
            read_media_list()
            logging.debug('media listing process has returned %(count)s files' % {'count': len(media_list)})
            fut.set_result(media_list)

    poll_process()
    return fut
示例#58
0
class HTTP1Connection(httputil.HTTPConnection):
    """Implements the HTTP/1.x protocol.

    This class can be on its own for clients, or via `HTTP1ServerConnection`
    for servers.
    """
    def __init__(
        self,
        stream: iostream.IOStream,
        is_client: bool,
        params: Optional[HTTP1ConnectionParameters] = None,
        context: Optional[object] = None,
    ) -> None:
        """
        :arg stream: an `.IOStream`
        :arg bool is_client: client or server
        :arg params: a `.HTTP1ConnectionParameters` instance or ``None``
        :arg context: an opaque application-defined object that can be accessed
            as ``connection.context``.
        """
        self.is_client = is_client
        self.stream = stream
        if params is None:
            params = HTTP1ConnectionParameters()
        self.params = params
        self.context = context
        self.no_keep_alive = params.no_keep_alive
        # The body limits can be altered by the delegate, so save them
        # here instead of just referencing self.params later.
        self._max_body_size = self.params.max_body_size or self.stream.max_buffer_size
        self._body_timeout = self.params.body_timeout
        # _write_finished is set to True when finish() has been called,
        # i.e. there will be no more data sent.  Data may still be in the
        # stream's write buffer.
        self._write_finished = False
        # True when we have read the entire incoming body.
        self._read_finished = False
        # _finish_future resolves when all data has been written and flushed
        # to the IOStream.
        self._finish_future = Future()  # type: Future[None]
        # If true, the connection should be closed after this request
        # (after the response has been written in the server side,
        # and after it has been read in the client)
        self._disconnect_on_finish = False
        self._clear_callbacks()
        # Save the start lines after we read or write them; they
        # affect later processing (e.g. 304 responses and HEAD methods
        # have content-length but no bodies)
        self._request_start_line = None  # type: Optional[httputil.RequestStartLine]
        self._response_start_line = None  # type: Optional[httputil.ResponseStartLine]
        self._request_headers = None  # type: Optional[httputil.HTTPHeaders]
        # True if we are writing output with chunked encoding.
        self._chunking_output = False
        # While reading a body with a content-length, this is the
        # amount left to read.
        self._expected_content_remaining = None  # type: Optional[int]
        # A Future for our outgoing writes, returned by IOStream.write.
        self._pending_write = None  # type: Optional[Future[None]]

    def read_response(
            self, delegate: httputil.HTTPMessageDelegate) -> Awaitable[bool]:
        """Read a single HTTP response.

        Typical client-mode usage is to write a request using `write_headers`,
        `write`, and `finish`, and then call ``read_response``.

        :arg delegate: a `.HTTPMessageDelegate`

        Returns a `.Future` that resolves to a bool after the full response has
        been read. The result is true if the stream is still open.
        """
        if self.params.decompress:
            delegate = _GzipMessageDelegate(delegate, self.params.chunk_size)
        return self._read_message(delegate)

    async def _read_message(self,
                            delegate: httputil.HTTPMessageDelegate) -> bool:
        need_delegate_close = False
        try:
            header_future = self.stream.read_until_regex(
                b"\r?\n\r?\n", max_bytes=self.params.max_header_size)
            if self.params.header_timeout is None:
                header_data = await header_future
            else:
                try:
                    header_data = await gen.with_timeout(
                        self.stream.io_loop.time() +
                        self.params.header_timeout,
                        header_future,
                        quiet_exceptions=iostream.StreamClosedError,
                    )
                except gen.TimeoutError:
                    self.close()
                    return False
            start_line_str, headers = self._parse_headers(header_data)
            if self.is_client:
                resp_start_line = httputil.parse_response_start_line(
                    start_line_str)
                self._response_start_line = resp_start_line
                start_line = (
                    resp_start_line
                )  # type: Union[httputil.RequestStartLine, httputil.ResponseStartLine]
                # TODO: this will need to change to support client-side keepalive
                self._disconnect_on_finish = False
            else:
                req_start_line = httputil.parse_request_start_line(
                    start_line_str)
                self._request_start_line = req_start_line
                self._request_headers = headers
                start_line = req_start_line
                self._disconnect_on_finish = not self._can_keep_alive(
                    req_start_line, headers)
            need_delegate_close = True
            with _ExceptionLoggingContext(app_log):
                header_recv_future = delegate.headers_received(
                    start_line, headers)
                if header_recv_future is not None:
                    await header_recv_future
            if self.stream is None:
                # We've been detached.
                need_delegate_close = False
                return False
            skip_body = False
            if self.is_client:
                assert isinstance(start_line, httputil.ResponseStartLine)
                if (self._request_start_line is not None
                        and self._request_start_line.method == "HEAD"):
                    skip_body = True
                code = start_line.code
                if code == 304:
                    # 304 responses may include the content-length header
                    # but do not actually have a body.
                    # http://tools.ietf.org/html/rfc7230#section-3.3
                    skip_body = True
                if 100 <= code < 200:
                    # 1xx responses should never indicate the presence of
                    # a body.
                    if "Content-Length" in headers or "Transfer-Encoding" in headers:
                        raise httputil.HTTPInputError(
                            "Response code %d cannot have body" % code)
                    # TODO: client delegates will get headers_received twice
                    # in the case of a 100-continue.  Document or change?
                    await self._read_message(delegate)
            else:
                if headers.get(
                        "Expect"
                ) == "100-continue" and not self._write_finished:
                    self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n")
            if not skip_body:
                body_future = self._read_body(
                    resp_start_line.code if self.is_client else 0, headers,
                    delegate)
                if body_future is not None:
                    if self._body_timeout is None:
                        await body_future
                    else:
                        try:
                            await gen.with_timeout(
                                self.stream.io_loop.time() +
                                self._body_timeout,
                                body_future,
                                quiet_exceptions=iostream.StreamClosedError,
                            )
                        except gen.TimeoutError:
                            gen_log.info("Timeout reading body from %s",
                                         self.context)
                            self.stream.close()
                            return False
            self._read_finished = True
            if not self._write_finished or self.is_client:
                need_delegate_close = False
                with _ExceptionLoggingContext(app_log):
                    delegate.finish()
            # If we're waiting for the application to produce an asynchronous
            # response, and we're not detached, register a close callback
            # on the stream (we didn't need one while we were reading)
            if (not self._finish_future.done() and self.stream is not None
                    and not self.stream.closed()):
                self.stream.set_close_callback(self._on_connection_close)
                await self._finish_future
            if self.is_client and self._disconnect_on_finish:
                self.close()
            if self.stream is None:
                return False
        except httputil.HTTPInputError as e:
            gen_log.info("Malformed HTTP message from %s: %s", self.context, e)
            if not self.is_client:
                await self.stream.write(b"HTTP/1.1 400 Bad Request\r\n\r\n")
            self.close()
            return False
        finally:
            if need_delegate_close:
                with _ExceptionLoggingContext(app_log):
                    delegate.on_connection_close()
            header_future = None  # type: ignore
            self._clear_callbacks()
        return True

    def _clear_callbacks(self) -> None:
        """Clears the callback attributes.

        This allows the request handler to be garbage collected more
        quickly in CPython by breaking up reference cycles.
        """
        self._write_callback = None
        self._write_future = None  # type: Optional[Future[None]]
        self._close_callback = None  # type: Optional[Callable[[], None]]
        if self.stream is not None:
            self.stream.set_close_callback(None)

    def set_close_callback(self, callback: Optional[Callable[[],
                                                             None]]) -> None:
        """Sets a callback that will be run when the connection is closed.

        Note that this callback is slightly different from
        `.HTTPMessageDelegate.on_connection_close`: The
        `.HTTPMessageDelegate` method is called when the connection is
        closed while receiving a message. This callback is used when
        there is not an active delegate (for example, on the server
        side this callback is used if the client closes the connection
        after sending its request but before receiving all the
        response.
        """
        self._close_callback = callback

    def _on_connection_close(self) -> None:
        # Note that this callback is only registered on the IOStream
        # when we have finished reading the request and are waiting for
        # the application to produce its response.
        if self._close_callback is not None:
            callback = self._close_callback
            self._close_callback = None
            callback()
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)
        self._clear_callbacks()

    def close(self) -> None:
        if self.stream is not None:
            self.stream.close()
        self._clear_callbacks()
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)

    def detach(self) -> iostream.IOStream:
        """Take control of the underlying stream.

        Returns the underlying `.IOStream` object and stops all further
        HTTP processing.  May only be called during
        `.HTTPMessageDelegate.headers_received`.  Intended for implementing
        protocols like websockets that tunnel over an HTTP handshake.
        """
        self._clear_callbacks()
        stream = self.stream
        self.stream = None  # type: ignore
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)
        return stream

    def set_body_timeout(self, timeout: float) -> None:
        """Sets the body timeout for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._body_timeout = timeout

    def set_max_body_size(self, max_body_size: int) -> None:
        """Sets the body size limit for a single request.

        Overrides the value from `.HTTP1ConnectionParameters`.
        """
        self._max_body_size = max_body_size

    def write_headers(
        self,
        start_line: Union[httputil.RequestStartLine,
                          httputil.ResponseStartLine],
        headers: httputil.HTTPHeaders,
        chunk: Optional[bytes] = None,
    ) -> "Future[None]":
        """Implements `.HTTPConnection.write_headers`."""
        lines = []
        if self.is_client:
            assert isinstance(start_line, httputil.RequestStartLine)
            self._request_start_line = start_line
            lines.append(
                utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1])))
            # Client requests with a non-empty body must have either a
            # Content-Length or a Transfer-Encoding.
            self._chunking_output = (
                start_line.method in ("POST", "PUT", "PATCH")
                and "Content-Length" not in headers
                and ("Transfer-Encoding" not in headers
                     or headers["Transfer-Encoding"] == "chunked"))
        else:
            assert isinstance(start_line, httputil.ResponseStartLine)
            assert self._request_start_line is not None
            assert self._request_headers is not None
            self._response_start_line = start_line
            lines.append(
                utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2])))
            self._chunking_output = (
                # TODO: should this use
                # self._request_start_line.version or
                # start_line.version?
                self._request_start_line.version == "HTTP/1.1"
                # Omit payload header field for HEAD request.
                and self._request_start_line.method != "HEAD"
                # 1xx, 204 and 304 responses have no body (not even a zero-length
                # body), and so should not have either Content-Length or
                # Transfer-Encoding headers.
                and start_line.code not in (204, 304)
                and (start_line.code < 100 or start_line.code >= 200)
                # No need to chunk the output if a Content-Length is specified.
                and "Content-Length" not in headers
                # Applications are discouraged from touching Transfer-Encoding,
                # but if they do, leave it alone.
                and "Transfer-Encoding" not in headers)
            # If connection to a 1.1 client will be closed, inform client
            if (self._request_start_line.version == "HTTP/1.1"
                    and self._disconnect_on_finish):
                headers["Connection"] = "close"
            # If a 1.0 client asked for keep-alive, add the header.
            if (self._request_start_line.version == "HTTP/1.0"
                    and self._request_headers.get("Connection",
                                                  "").lower() == "keep-alive"):
                headers["Connection"] = "Keep-Alive"
        if self._chunking_output:
            headers["Transfer-Encoding"] = "chunked"
        if not self.is_client and (self._request_start_line.method == "HEAD"
                                   or cast(httputil.ResponseStartLine,
                                           start_line).code == 304):
            self._expected_content_remaining = 0
        elif "Content-Length" in headers:
            self._expected_content_remaining = int(headers["Content-Length"])
        else:
            self._expected_content_remaining = None
        # TODO: headers are supposed to be of type str, but we still have some
        # cases that let bytes slip through. Remove these native_str calls when those
        # are fixed.
        header_lines = (native_str(n) + ": " + native_str(v)
                        for n, v in headers.get_all())
        lines.extend(line.encode("latin1") for line in header_lines)
        for line in lines:
            if b"\n" in line:
                raise ValueError("Newline in header: " + repr(line))
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            future.set_exception(iostream.StreamClosedError())
            future.exception()
        else:
            future = self._write_future = Future()
            data = b"\r\n".join(lines) + b"\r\n\r\n"
            if chunk:
                data += self._format_chunk(chunk)
            self._pending_write = self.stream.write(data)
            future_add_done_callback(self._pending_write,
                                     self._on_write_complete)
        return future

    def _format_chunk(self, chunk: bytes) -> bytes:
        if self._expected_content_remaining is not None:
            self._expected_content_remaining -= len(chunk)
            if self._expected_content_remaining < 0:
                # Close the stream now to stop further framing errors.
                self.stream.close()
                raise httputil.HTTPOutputError(
                    "Tried to write more data than Content-Length")
        if self._chunking_output and chunk:
            # Don't write out empty chunks because that means END-OF-STREAM
            # with chunked encoding
            return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n"
        else:
            return chunk

    def write(self, chunk: bytes) -> "Future[None]":
        """Implements `.HTTPConnection.write`.

        For backwards compatibility it is allowed but deprecated to
        skip `write_headers` and instead call `write()` with a
        pre-encoded header block.
        """
        future = None
        if self.stream.closed():
            future = self._write_future = Future()
            self._write_future.set_exception(iostream.StreamClosedError())
            self._write_future.exception()
        else:
            future = self._write_future = Future()
            self._pending_write = self.stream.write(self._format_chunk(chunk))
            future_add_done_callback(self._pending_write,
                                     self._on_write_complete)
        return future

    def finish(self) -> None:
        """Implements `.HTTPConnection.finish`."""
        if (self._expected_content_remaining is not None
                and self._expected_content_remaining != 0
                and not self.stream.closed()):
            self.stream.close()
            raise httputil.HTTPOutputError(
                "Tried to write %d bytes less than Content-Length" %
                self._expected_content_remaining)
        if self._chunking_output:
            if not self.stream.closed():
                self._pending_write = self.stream.write(b"0\r\n\r\n")
                self._pending_write.add_done_callback(self._on_write_complete)
        self._write_finished = True
        # If the app finished the request while we're still reading,
        # divert any remaining data away from the delegate and
        # close the connection when we're done sending our response.
        # Closing the connection is the only way to avoid reading the
        # whole input body.
        if not self._read_finished:
            self._disconnect_on_finish = True
        # No more data is coming, so instruct TCP to send any remaining
        # data immediately instead of waiting for a full packet or ack.
        self.stream.set_nodelay(True)
        if self._pending_write is None:
            self._finish_request(None)
        else:
            future_add_done_callback(self._pending_write, self._finish_request)

    def _on_write_complete(self, future: "Future[None]") -> None:
        exc = future.exception()
        if exc is not None and not isinstance(exc, iostream.StreamClosedError):
            future.result()
        if self._write_callback is not None:
            callback = self._write_callback
            self._write_callback = None
            self.stream.io_loop.add_callback(callback)
        if self._write_future is not None:
            future = self._write_future
            self._write_future = None
            future_set_result_unless_cancelled(future, None)

    def _can_keep_alive(self, start_line: httputil.RequestStartLine,
                        headers: httputil.HTTPHeaders) -> bool:
        if self.params.no_keep_alive:
            return False
        connection_header = headers.get("Connection")
        if connection_header is not None:
            connection_header = connection_header.lower()
        if start_line.version == "HTTP/1.1":
            return connection_header != "close"
        elif ("Content-Length" in headers
              or headers.get("Transfer-Encoding", "").lower() == "chunked"
              or getattr(start_line, "method", None) in ("HEAD", "GET")):
            # start_line may be a request or response start line; only
            # the former has a method attribute.
            return connection_header == "keep-alive"
        return False

    def _finish_request(self, future: "Optional[Future[None]]") -> None:
        self._clear_callbacks()
        if not self.is_client and self._disconnect_on_finish:
            self.close()
            return
        # Turn Nagle's algorithm back on, leaving the stream in its
        # default state for the next request.
        self.stream.set_nodelay(False)
        if not self._finish_future.done():
            future_set_result_unless_cancelled(self._finish_future, None)

    def _parse_headers(self, data: bytes) -> Tuple[str, httputil.HTTPHeaders]:
        # The lstrip removes newlines that some implementations sometimes
        # insert between messages of a reused connection.  Per RFC 7230,
        # we SHOULD ignore at least one empty line before the request.
        # http://tools.ietf.org/html/rfc7230#section-3.5
        data_str = native_str(data.decode("latin1")).lstrip("\r\n")
        # RFC 7230 section allows for both CRLF and bare LF.
        eol = data_str.find("\n")
        start_line = data_str[:eol].rstrip("\r")
        headers = httputil.HTTPHeaders.parse(data_str[eol:])
        return start_line, headers

    def _read_body(
        self,
        code: int,
        headers: httputil.HTTPHeaders,
        delegate: httputil.HTTPMessageDelegate,
    ) -> Optional[Awaitable[None]]:
        if "Content-Length" in headers:
            if "Transfer-Encoding" in headers:
                # Response cannot contain both Content-Length and
                # Transfer-Encoding headers.
                # http://tools.ietf.org/html/rfc7230#section-3.3.3
                raise httputil.HTTPInputError(
                    "Response with both Transfer-Encoding and Content-Length")
            if "," in headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r",\s*", headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise httputil.HTTPInputError(
                        "Multiple unequal Content-Lengths: %r" %
                        headers["Content-Length"])
                headers["Content-Length"] = pieces[0]

            try:
                content_length = int(
                    headers["Content-Length"])  # type: Optional[int]
            except ValueError:
                # Handles non-integer Content-Length value.
                raise httputil.HTTPInputError(
                    "Only integer Content-Length is allowed: %s" %
                    headers["Content-Length"])

            if cast(int, content_length) > self._max_body_size:
                raise httputil.HTTPInputError("Content-Length too long")
        else:
            content_length = None

        if code == 204:
            # This response code is not allowed to have a non-empty body,
            # and has an implicit length of zero instead of read-until-close.
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if "Transfer-Encoding" in headers or content_length not in (None,
                                                                        0):
                raise httputil.HTTPInputError(
                    "Response with code %d should not have body" % code)
            content_length = 0

        if content_length is not None:
            return self._read_fixed_body(content_length, delegate)
        if headers.get("Transfer-Encoding", "").lower() == "chunked":
            return self._read_chunked_body(delegate)
        if self.is_client:
            return self._read_body_until_close(delegate)
        return None

    async def _read_fixed_body(self, content_length: int,
                               delegate: httputil.HTTPMessageDelegate) -> None:
        while content_length > 0:
            body = await self.stream.read_bytes(min(self.params.chunk_size,
                                                    content_length),
                                                partial=True)
            content_length -= len(body)
            if not self._write_finished or self.is_client:
                with _ExceptionLoggingContext(app_log):
                    ret = delegate.data_received(body)
                    if ret is not None:
                        await ret

    async def _read_chunked_body(
            self, delegate: httputil.HTTPMessageDelegate) -> None:
        # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1
        total_size = 0
        while True:
            chunk_len_str = await self.stream.read_until(b"\r\n", max_bytes=64)
            chunk_len = int(chunk_len_str.strip(), 16)
            if chunk_len == 0:
                crlf = await self.stream.read_bytes(2)
                if crlf != b"\r\n":
                    raise httputil.HTTPInputError(
                        "improperly terminated chunked request")
                return
            total_size += chunk_len
            if total_size > self._max_body_size:
                raise httputil.HTTPInputError("chunked body too large")
            bytes_to_read = chunk_len
            while bytes_to_read:
                chunk = await self.stream.read_bytes(min(
                    bytes_to_read, self.params.chunk_size),
                                                     partial=True)
                bytes_to_read -= len(chunk)
                if not self._write_finished or self.is_client:
                    with _ExceptionLoggingContext(app_log):
                        ret = delegate.data_received(chunk)
                        if ret is not None:
                            await ret
            # chunk ends with \r\n
            crlf = await self.stream.read_bytes(2)
            assert crlf == b"\r\n"

    async def _read_body_until_close(
            self, delegate: httputil.HTTPMessageDelegate) -> None:
        body = await self.stream.read_until_close()
        if not self._write_finished or self.is_client:
            with _ExceptionLoggingContext(app_log):
                ret = delegate.data_received(body)
                if ret is not None:
                    await ret
示例#59
0
 def get_app(self):
     self.close_future = Future()
     return Application([
         ('/', EchoHandler, dict(close_future=self.close_future)),
     ],
                        websocket_max_message_size=1024)
示例#60
0
def get_zipped_content(camera_config: dict, media_type: str, group: str) -> typing.Awaitable:
    fut = Future()
    target_dir = camera_config.get('target_dir')

    if media_type == 'picture':
        exts = _PICTURE_EXTS

    elif media_type == 'movie':
        exts = _MOVIE_EXTS

    working = multiprocessing.Value('b')
    working.value = True

    # create a subprocess to add files to zip
    def do_zip(pipe):
        parent_pipe.close()

        mf = _list_media_files(target_dir, exts=exts, prefix=group)
        paths = []
        for (p, st) in mf:  # @UnusedVariable
            path = p[len(target_dir):]
            if path.startswith('/'):
                path = path[1:]

            paths.append(path)

        zip_filename = os.path.join(settings.MEDIA_PATH, '.zip-%s' % int(time.time()))
        logging.debug('adding %d files to zip file "%s"' % (len(paths), zip_filename))

        try:
            with zipfile.ZipFile(zip_filename, mode='w') as f:
                for path in paths:
                    full_path = os.path.join(target_dir, path)
                    f.write(full_path, path)

        except Exception as e:
            logging.error('failed to create zip file "%s": %s' % (zip_filename, e))

            working.value = False
            pipe.close()
            return

        logging.debug('reading zip file "%s" into memory' % zip_filename)

        try:
            with open(zip_filename, mode='rb') as f:
                data = f.read()

            working.value = False
            pipe.send(data)
            logging.debug('zip data ready')

        except Exception as e:
            logging.error('failed to read zip file "%s": %s' % (zip_filename, e))
            working.value = False

        finally:
            os.remove(zip_filename)
            pipe.close()

    logging.debug('starting zip process...')

    (parent_pipe, child_pipe) = multiprocessing.Pipe(duplex=False)
    process = multiprocessing.Process(target=do_zip, args=(child_pipe,))
    process.start()
    child_pipe.close()

    # poll the subprocess to see when it has finished
    started = datetime.datetime.now()

    def poll_process():
        io_loop = IOLoop.instance()
        if working.value:
            now = datetime.datetime.now()
            delta = now - started
            if delta.seconds < settings.ZIP_TIMEOUT:
                io_loop.add_timeout(datetime.timedelta(seconds=0.5), poll_process)

            else:  # process did not finish in time
                logging.error('timeout waiting for the zip process to finish')
                try:
                    os.kill(process.pid, signal.SIGTERM)

                except:
                    pass  # nevermind

                fut.set_result(None)

        else:  # finished
            try:
                data = parent_pipe.recv()
                logging.debug('zip process has returned %d bytes' % len(data))

            except:
                data = None

            fut.set_result(data)

    poll_process()
    return fut