def _extend(self, res): Cursor._extend(self, res) self.new_response.set_result(True) self.new_response = Future()
def get_app(self): self.close_future = Future() return Application([('/native', NativeCoroutineOnMessageHandler, dict(close_future=self.close_future))])
def list_media(camera_config: dict, media_type: str, prefix=None) -> typing.Awaitable: fut = Future() target_dir = camera_config.get('target_dir') if media_type == 'picture': exts = _PICTURE_EXTS elif media_type == 'movie': exts = _MOVIE_EXTS # create a subprocess to retrieve media files def do_list_media(pipe): import mimetypes parent_pipe.close() mf = _list_media_files(target_dir, exts=exts, prefix=prefix) for (p, st) in mf: path = p[len(target_dir):] if not path.startswith('/'): path = '/' + path timestamp = st.st_mtime size = st.st_size pipe.send({ 'path': path, 'mimeType': mimetypes.guess_type(path)[0] if mimetypes.guess_type(path)[ 0] is not None else 'video/mpeg', 'momentStr': pretty_date_time(datetime.datetime.fromtimestamp(timestamp)), 'momentStrShort': pretty_date_time(datetime.datetime.fromtimestamp(timestamp), short=True), 'sizeStr': utils.pretty_size(size), 'timestamp': timestamp }) pipe.close() logging.debug('starting media listing process...') (parent_pipe, child_pipe) = multiprocessing.Pipe(duplex=False) process = multiprocessing.Process(target=do_list_media, args=(child_pipe,)) process.start() child_pipe.close() # poll the subprocess to see when it has finished started = datetime.datetime.now() media_list = [] def read_media_list(): while parent_pipe.poll(): try: media_list.append(parent_pipe.recv()) except EOFError: break def poll_process(): io_loop = IOLoop.instance() if process.is_alive(): # not finished yet now = datetime.datetime.now() delta = now - started if delta.seconds < settings.LIST_MEDIA_TIMEOUT: io_loop.add_timeout(datetime.timedelta(seconds=0.5), poll_process) read_media_list() else: # process did not finish in time logging.error('timeout waiting for the media listing process to finish') try: os.kill(process.pid, signal.SIGTERM) except: pass # nevermind fut.set_result(None) else: # finished read_media_list() logging.debug('media listing process has returned %(count)s files' % {'count': len(media_list)}) fut.set_result(media_list) poll_process() return fut
def setUp(self, cfg, mock_storage): super(AucoteTest, self).setUp() self.cfg = cfg self.storage = mock_storage self.cfg._cfg = { 'portdetection': { 'security_scans': ['tools'] }, 'storage': { 'db': 'test_storage', 'fresh_start': True, 'max_nodes_query': 243 }, 'service': { 'scans': { 'threads': 30, 'parallel_tasks': 30 }, 'api': { 'v1': { 'host': None, 'port': None }, 'path': '' } }, 'kuduworker': { 'enable': True, 'queue': { 'address': None } }, 'pid_file': None, 'fixtures': { 'exploits': { 'filename': None } }, 'config_filename': 'test', 'topdis': { 'api': { 'host': 'localhost', 'port': '1234', 'base': '/api/v1' } }, 'tcpportscan': { 'host': 'localhost', 'port': '1239' }, 'tftp': { 'port': 6969, 'timeout': 120, 'host': '127.0.0.1', 'min_port': 60000, 'max_port': 61000 } } self.aucote = Aucote(exploits=MagicMock(), kudu_queue=MagicMock(), tools_config=MagicMock()) self.aucote.ioloop = MagicMock() self.aucote._storage_thread = MagicMock() self.aucote._tftp_thread = MagicMock() for task_manager in self.aucote.async_task_managers.values(): future = Future() future.set_result(MagicMock()) task_manager.shutdown_condition.wait.return_value = future self.aucote._scan_task = MagicMock() self.scan_task_run = MagicMock() scan_task_future = Future() scan_task_future.set_result(self.scan_task_run) self.aucote._scan_task.run.return_value = scan_task_future
def test_coroutine(): ws_client = WebsocketClient() prop_names = list(td.properties.keys()) prop_name_01 = prop_names[0] prop_name_02 = prop_names[1] obsv_01 = ws_client.on_property_change(td, prop_name_01) obsv_02 = ws_client.on_property_change(td, prop_name_02) prop_values_01 = [uuid.uuid4().hex for _ in range(10)] prop_values_02 = [uuid.uuid4().hex for _ in range(90)] future_values_01 = {key: Future() for key in prop_values_01} future_values_02 = {key: Future() for key in prop_values_02} future_conn_01 = Future() future_conn_02 = Future() def build_on_next(fut_conn, fut_vals): def on_next(ev): if not fut_conn.done(): fut_conn.set_result(True) if ev.data.value in fut_vals: fut_vals[ev.data.value].set_result(True) return on_next on_next_01 = build_on_next(future_conn_01, future_values_01) on_next_02 = build_on_next(future_conn_02, future_values_02) subscription_01 = obsv_01.subscribe_on( IOLoopScheduler()).subscribe(on_next_01) subscription_02 = obsv_02.subscribe_on( IOLoopScheduler()).subscribe(on_next_02) while not future_conn_01.done() or not future_conn_02.done(): yield exposed_thing.write_property(prop_name_01, uuid.uuid4().hex) yield exposed_thing.write_property(prop_name_02, uuid.uuid4().hex) yield tornado.gen.sleep(0) assert len(prop_values_01) < len(prop_values_02) for idx in range(len(prop_values_01)): yield exposed_thing.write_property(prop_name_01, prop_values_01[idx]) yield exposed_thing.write_property(prop_name_02, prop_values_02[idx]) yield list(future_values_01.values()) assert next(fut for fut in six.itervalues(future_values_02) if not fut.done()) subscription_01.dispose() for val in prop_values_02[len(prop_values_01):]: yield exposed_thing.write_property(prop_name_02, val) yield list(future_values_02.values()) subscription_02.dispose()
def write_headers(self, start_line, headers, chunk=None, callback=None): """Implements `.HTTPConnection.write_headers`.""" lines = [] if self.is_client: self._request_start_line = start_line lines.append( utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1]))) # Client requests with a non-empty body must have either a # Content-Length or a Transfer-Encoding. self._chunking_output = (start_line.method in ('POST', 'PUT', 'PATCH') and 'Content-Length' not in headers and 'Transfer-Encoding' not in headers) else: self._response_start_line = start_line lines.append( utf8('HTTP/1.1 %d %s' % (start_line[1], start_line[2]))) self._chunking_output = ( # TODO: should this use # self._request_start_line.version or # start_line.version? self._request_start_line.version == 'HTTP/1.1' and # 1xx, 204 and 304 responses have no body (not even a zero-length # body), and so should not have either Content-Length or # Transfer-Encoding headers. start_line.code not in (204, 304) and (start_line.code < 100 or start_line.code >= 200) and # No need to chunk the output if a Content-Length is specified. 'Content-Length' not in headers and # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. 'Transfer-Encoding' not in headers) # If connection to a 1.1 client will be closed, inform client if (self._request_start_line.version == 'HTTP/1.1' and self._disconnect_on_finish): headers['Connection'] = 'close' # If a 1.0 client asked for keep-alive, add the header. if (self._request_start_line.version == 'HTTP/1.0' and (self._request_headers.get('Connection', '').lower() == 'keep-alive')): headers['Connection'] = 'Keep-Alive' if self._chunking_output: headers['Transfer-Encoding'] = 'chunked' if (not self.is_client and (self._request_start_line.method == 'HEAD' or start_line.code == 304)): self._expected_content_remaining = 0 elif 'Content-Length' in headers: self._expected_content_remaining = int(headers['Content-Length']) else: self._expected_content_remaining = None # TODO: headers are supposed to be of type str, but we still have some # cases that let bytes slip through. Remove these native_str calls when those # are fixed. header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all()) if PY3: lines.extend(l.encode('latin1') for l in header_lines) else: lines.extend(header_lines) for line in lines: if b'\n' in line: raise ValueError('Newline in header: ' + repr(line)) future = None if self.stream.closed(): future = self._write_future = Future() future.set_exception(iostream.StreamClosedError()) future.exception() else: if callback is not None: self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() data = b"\r\n".join(lines) + b"\r\n\r\n" if chunk: data += self._format_chunk(chunk) self._pending_write = self.stream.write(data) self._pending_write.add_done_callback(self._on_write_complete) return future
def create_stream(self, af, addr): future = Future() self.connect_futures[(af, addr)] = future return future
def __init__(self, **kwargs): super(GatewayWebSocketClient, self).__init__(**kwargs) self.kernel_id = None self.ws = None self.ws_future = Future() self.disconnected = False
def close(self): fut = Future() fut.set_result(True) return fut
def list(self): future = Future() future.set_exception(InvalidProjectEgg()) return future
def get(self, key): f = Future() f.set_result(self._cache.get(key)) return f
def list(self): future = Future() future.set_exception(ProcessFailed()) return future
def connect_with_future(self): self.connect_future = Future() self.ioloop.add_future(TCPClient().connect(self.ip, self.port), self.connect_callback) self.set_timout(timeout=3) return self.connect_future
def test_already_resolved(self): future = Future() future.set_result('asdf') result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future) self.assertEqual(result, 'asdf')
def wait_for_notifications(self, key=None): self.connected[key] = Future() return self.connected[key]
def start_tls(self, server_side, ssl_options=None, server_hostname=None, connect_timeout=None): if (self._read_callback or self._read_future or self._write_callback or self._write_futures or self._connect_callback or self._connect_future or self._pending_callbacks or self._closed or self._read_buffer or self._write_buffer): raise ValueError("IOStream is not idle; cannot convert to SSL") if ssl_options is None: ssl_options = _client_ssl_defaults socket = self.socket self.io_loop.remove_handler(socket) self.socket = None socket = ssl_wrap_socket(socket, ssl_options, server_hostname=server_hostname, server_side=server_side, do_handshake_on_connect=False) orig_close_callback = self._close_callback self._close_callback = None future = Future() ssl_stream = SSLIOStream(socket, ssl_options=ssl_options) # Wrap the original close callback so we can fail our Future as well. # If we had an "unwrap" counterpart to this method we would need # to restore the original callback after our Future resolves # so that repeated wrap/unwrap calls don't build up layers. def close_callback(): if not future.done(): # Note that unlike most Futures returned by IOStream, # this one passes the underlying error through directly # instead of wrapping everything in a StreamClosedError # with a real_error attribute. This is because once the # connection is established it's more helpful to raise # the SSLError directly than to hide it behind a # StreamClosedError (and the client is expecting SSL # issues rather than network issues since this method is # named start_tls). future.set_exception(ssl_stream.error or StreamClosedError()) if orig_close_callback is not None: orig_close_callback() if connect_timeout: def timeout(): ssl_stream._loop_connect_timeout = None if not future.done(): ssl_stream.close((None, IOError("Connect timeout"), None)) ssl_stream._loop_connect_timeout = self.io_loop.call_later( connect_timeout, timeout) ssl_stream.set_close_callback(close_callback) ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream ) ssl_stream.max_buffer_size = self.max_buffer_size ssl_stream.read_chunk_size = self.read_chunk_size return future
def _handle_request(self): """Turns an HTTP request into annotated notebook code to execute on a kernel. Sets the HTTP response code, headers, and response body based on the result of the kernel execution. Then finishes the Tornado response. """ self.response_future = Future() kernel_client, kernel_id = yield self.kernel_pool.acquire() try: # Method not supported if self.request.method not in self.sources: raise UnsupportedMethodError(self.request.method) # Set the Content-Type and status to default values self.set_header('Content-Type', 'text/plain') self.set_status(200) # Get the source to execute in response to this request source_code = self.sources[self.request.method] # Build the request dictionary request = json.dumps({ 'body': parse_body(self.request), 'args': parse_args(self.request.query_arguments), 'path': self.path_kwargs, 'headers': headers_to_dict(self.request.headers) }) # Turn the request string into a valid code string request_code = format_request(request) # Run the request and source code and yield until there's a result access_log.debug( 'Request code for notebook cell is: {}'.format(request_code)) request_future = self.execute_code(kernel_client, kernel_id, request_code) yield request_future source_future = self.execute_code(kernel_client, kernel_id, source_code) source_result = yield source_future # If a response code cell exists, execute it if self.request.method in self.response_sources: response_code = self.response_sources[self.request.method] response_future = self.execute_code(kernel_client, kernel_id, response_code) # Wait for the response and parse the json value response_result = yield response_future response = json.loads(response_result) # Copy all the header values into the tornado response if 'headers' in response: for header in response['headers']: self.set_header(header, response['headers'][header]) # Set the status code if it exists if 'status' in response: self.set_status(response['status']) # Write the result of the source code execution self.write(source_result) # If there was a problem executing an code, return a 500 except CodeExecutionError as err: self.write(str(err)) self.set_status(500) # An unspported method was called on this handler except UnsupportedMethodError: self.set_status(405) finally: # Always make sure we release the kernel and finish the request self.response_future.set_result(None) self.kernel_pool.release(kernel_id) self.finish()
def write_headers(self, start_line, headers, chunk=None, callback=None): """Implements `.HTTPConnection.write_headers`.""" lines = [] if self.is_client: self._request_start_line = start_line lines.append( utf8('%s %s HTTP/1.1' % (start_line[0], start_line[1]))) # Client requests with a non-empty body must have either a # Content-Length or a Transfer-Encoding. self._chunking_output = (start_line.method in ('POST', 'PUT', 'PATCH') and 'Content-Length' not in headers and 'Transfer-Encoding' not in headers) else: self._response_start_line = start_line lines.append( utf8('HTTP/1.1 %s %s' % (start_line[1], start_line[2]))) self._chunking_output = ( # TODO: should this use # self._request_start_line.version or # start_line.version? self._request_start_line.version == 'HTTP/1.1' and # 304 responses have no body (not even a zero-length body), and so # should not have either Content-Length or Transfer-Encoding. # headers. start_line.code != 304 and # No need to chunk the output if a Content-Length is specified. 'Content-Length' not in headers and # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. 'Transfer-Encoding' not in headers) # If a 1.0 client asked for keep-alive, add the header. if (self._request_start_line.version == 'HTTP/1.0' and (self._request_headers.get('Connection', '').lower() == 'keep-alive')): headers['Connection'] = 'Keep-Alive' if self._chunking_output: headers['Transfer-Encoding'] = 'chunked' if (not self.is_client and (self._request_start_line.method == 'HEAD' or start_line.code == 304)): self._expected_content_remaining = 0 elif 'Content-Length' in headers: self._expected_content_remaining = int(headers['Content-Length']) else: self._expected_content_remaining = None lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()]) for line in lines: if b'\n' in line: raise ValueError('Newline in header: ' + repr(line)) future = None if self.stream.closed(): future = self._write_future = Future() future.set_exception(iostream.StreamClosedError()) else: if callback is not None: self._write_callback = stack_context.wrap(callback) else: future = self._write_future = Future() data = b"\r\n".join(lines) + b"\r\n\r\n" if chunk: data += self._format_chunk(chunk) self._pending_write = self.stream.write(data) self._pending_write.add_done_callback(self._on_write_complete) return future
def fetch(self, request, callback=None, raise_error=True, **kwargs): """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError( "kwargs can't be used if request is an HTTPRequest object") # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request = _RequestProxy(request, self.defaults) future = Future() if callback is not None: callback = stack_context.wrap(callback) def handle_future(future): exc = future.exception() if isinstance(exc, HTTPError) and exc.response is not None: response = exc.response elif exc is not None: response = HTTPResponse(request, 599, error=exc, request_time=time.time() - request.start_time) else: response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) def handle_response(response): if raise_error and response.error: future.set_exception(response.error) else: future_set_result_unless_cancelled(future, response) self.fetch_impl(request, handle_response) return future
def test_easy_handshake_ssl_errors(ioloop_mock, mocker): mocker.patch('temboardui.autossl.EasySSLIOStream._add_io_state') from tornado.concurrent import Future from temboardui.autossl import ( EasySSLIOStream, ssl, SSLErrorHTTPRequest, ) socket = Mock(name='socket', spec=ssl.SSLSocket) stream = EasySSLIOStream(socket) stream.wait_for_handshake() socket.do_handshake.side_effect = ssl.SSLError(123) stream.socket = socket with pytest.raises(ssl.SSLError): stream._do_ssl_handshake() socket.do_handshake.side_effect = ssl.SSLError(ssl.SSL_ERROR_WANT_READ) stream.socket = socket stream._do_ssl_handshake() assert True is stream._handshake_reading socket.do_handshake.side_effect = ssl.SSLError(ssl.SSL_ERROR_WANT_WRITE) stream.socket = socket stream._do_ssl_handshake() assert True is stream._handshake_writing socket.do_handshake.side_effect = ssl.SSLError(ssl.SSL_ERROR_EOF) stream.socket = socket stream._do_ssl_handshake() stream.wait_for_handshake() socket.do_handshake.side_effect = ssl.SSLError(ssl.SSL_ERROR_SSL) socket.getpeername.side_effect = Exception('Unknown test error') stream.socket = socket stream._do_ssl_handshake() assert socket.getpeername.called is True socket.do_handshake.side_effect = ssl.SSLError(ssl.SSL_ERROR_SSL) socket.getpeername.side_effect = None socket.getpeername.reset_mock() socket.getpeername.return_value = '127.0.0.1' stream.socket = socket stream.wait_for_handshake() stream._do_ssl_handshake() assert socket.getpeername.called is True socket.do_handshake.side_effect = err = ssl.SSLError( ssl.SSL_ERROR_SSL, '[SSL:HTTP_REQUEST] http request', ) # See. https://github.com/python/cpython/blob/8ae264ce/Modules/_ssl.c#L475 setattr(err, 'reason', 'HTTP_REQUEST') socket.getpeername.side_effect = None socket.getpeername.reset_mock() socket.getpeername.return_value = '127.0.0.1' stream.socket = socket stream._ssl_connect_future = fut = Future() stream._do_ssl_handshake() assert socket.getpeername.called is True with pytest.raises(SSLErrorHTTPRequest): fut.result()
class WSGIApplication(web.Application): """A WSGI equivalent of `tornado.web.Application`. .. deprecated:: 4.0 Use a regular `.Application` and wrap it in `WSGIAdapter` instead. """ def __call__(self, environ, start_response): return WSGIAdapter(self)(environ, start_response) # WSGI has no facilities for flow control, so just return an already-done # Future when the interface requires it. _dummy_future = Future() _dummy_future.set_result(None) class _WSGIConnection(httputil.HTTPConnection): def __init__(self, method, start_response, context): self.method = method self.start_response = start_response self.context = context self._write_buffer = [] self._finished = False self._expected_content_remaining = None self._error = None def set_close_callback(self, callback): # WSGI has no facility for detecting a closed connection mid-request,
def test_timeout(self): with self.assertRaises(gen.TimeoutError): yield gen.with_timeout(datetime.timedelta(seconds=0.1), Future())
def get_future(self, result=None): future = Future() future.set_result(result) return future
def tester(): fut = Future() weakref_scope[0] = weakref.ref(fut) self.io_loop.add_callback(callback) yield fut
def start(self): """Return a Future that will never finish""" return Future()
def write_headers( self, start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], headers: httputil.HTTPHeaders, chunk: Optional[bytes] = None, ) -> "Future[None]": """Implements `.HTTPConnection.write_headers`.""" lines = [] if self.is_client: assert isinstance(start_line, httputil.RequestStartLine) self._request_start_line = start_line lines.append( utf8("%s %s HTTP/1.1" % (start_line[0], start_line[1]))) # Client requests with a non-empty body must have either a # Content-Length or a Transfer-Encoding. self._chunking_output = ( start_line.method in ("POST", "PUT", "PATCH") and "Content-Length" not in headers and ("Transfer-Encoding" not in headers or headers["Transfer-Encoding"] == "chunked")) else: assert isinstance(start_line, httputil.ResponseStartLine) assert self._request_start_line is not None assert self._request_headers is not None self._response_start_line = start_line lines.append( utf8("HTTP/1.1 %d %s" % (start_line[1], start_line[2]))) self._chunking_output = ( # TODO: should this use # self._request_start_line.version or # start_line.version? self._request_start_line.version == "HTTP/1.1" # Omit payload header field for HEAD request. and self._request_start_line.method != "HEAD" # 1xx, 204 and 304 responses have no body (not even a zero-length # body), and so should not have either Content-Length or # Transfer-Encoding headers. and start_line.code not in (204, 304) and (start_line.code < 100 or start_line.code >= 200) # No need to chunk the output if a Content-Length is specified. and "Content-Length" not in headers # Applications are discouraged from touching Transfer-Encoding, # but if they do, leave it alone. and "Transfer-Encoding" not in headers) # If connection to a 1.1 client will be closed, inform client if (self._request_start_line.version == "HTTP/1.1" and self._disconnect_on_finish): headers["Connection"] = "close" # If a 1.0 client asked for keep-alive, add the header. if (self._request_start_line.version == "HTTP/1.0" and self._request_headers.get("Connection", "").lower() == "keep-alive"): headers["Connection"] = "Keep-Alive" if self._chunking_output: headers["Transfer-Encoding"] = "chunked" if not self.is_client and (self._request_start_line.method == "HEAD" or cast(httputil.ResponseStartLine, start_line).code == 304): self._expected_content_remaining = 0 elif "Content-Length" in headers: self._expected_content_remaining = int(headers["Content-Length"]) else: self._expected_content_remaining = None # TODO: headers are supposed to be of type str, but we still have some # cases that let bytes slip through. Remove these native_str calls when those # are fixed. header_lines = (native_str(n) + ": " + native_str(v) for n, v in headers.get_all()) lines.extend(line.encode("latin1") for line in header_lines) for line in lines: if b"\n" in line: raise ValueError("Newline in header: " + repr(line)) future = None if self.stream.closed(): future = self._write_future = Future() future.set_exception(iostream.StreamClosedError()) future.exception() else: future = self._write_future = Future() data = b"\r\n".join(lines) + b"\r\n\r\n" if chunk: data += self._format_chunk(chunk) self._pending_write = self.stream.write(data) future_add_done_callback(self._pending_write, self._on_write_complete) return future
def get_app(self): self.close_future = Future() return Application([ ('/', EchoHandler, dict(close_future=self.close_future)), ], websocket_max_message_size=1024)
class ZMQChannels: zmq_stream: str = None channels: dict = {} kernel_id: str = None kernel_info_channel: str = None _kernel_info_future = Future() _close_future = Future() session_key: str = '' _iopub_window_msg_count: int = 0 _iopub_window_byte_count: int = 0 _iopub_msgs_exceeded: bool = False _iopub_data_exceeded: bool = False _iopub_window_byte_queue: list = [] _open_sessions = {} def __init__(self, *args, **kwargs): super(ZMQWebsocket, self).__init__(self, *args, **kwargs) self.session = Session(config=self.config) @property def config(self): return router.app.config @property def app(self): return router.app @property def log(self): return router.app.log @property def kernel_manager(self): return router.app.kernel_manager @property def kernel_info_timeout(self): km_default = self.kernel_manager.kernel_info_timeout return app.tornado_settings.get('kernel_info_timeout', km_default) @property def iopub_msg_rate_limit(self): return app.tornado_settings.get('iopub_msg_rate_limit', 0) @property def iopub_data_rate_limit(self): return app.tornado_settings.get('iopub_data_rate_limit', 0) @property def rate_limit_window(self): return app.tornado_settings.get('rate_limit_window', 1.0) def __repr__(self): return "%s(%s)" % (self.__class__.__name__, getattr(self, 'kernel_id', 'uninitialized')) def create_stream(self): km = self.kernel_manager identity = self.session.bsession for channel in ('shell', 'control', 'iopub', 'stdin'): meth = getattr(km, 'connect_' + channel) self.channels[channel] = stream = meth(self.kernel_id, identity=identity) stream.channel = channel def request_kernel_info(self): """send a request for kernel_info""" km = self.kernel_manager kernel = km.get_kernel(self.kernel_id) try: # check for previous request future = kernel._kernel_info_future except AttributeError: self.log.debug("Requesting kernel info from %s", self.kernel_id) # Create a kernel_info channel to query the kernel protocol version. # This channel will be closed after the kernel_info reply is received. if self.kernel_info_channel is None: self.kernel_info_channel = km.connect_shell(self.kernel_id) self.kernel_info_channel.on_recv(self._handle_kernel_info_reply) self.session.send(self.kernel_info_channel, "kernel_info_request") # store the future on the kernel, so only one request is sent kernel._kernel_info_future = self._kernel_info_future else: if not future.done(): self.log.debug("Waiting for pending kernel_info request") future.add_done_callback( lambda f: self._finish_kernel_info(f.result())) return self._kernel_info_future def _handle_kernel_info_reply(self, msg): """process the kernel_info_reply enabling msg spec adaptation, if necessary """ idents, msg = self.session.feed_identities(msg) try: msg = self.session.deserialize(msg) except: self.log.error("Bad kernel_info reply", exc_info=True) self._kernel_info_future.set_result({}) return else: info = msg['content'] self.log.debug("Received kernel info: %s", info) if msg['msg_type'] != 'kernel_info_reply' or 'protocol_version' not in info: self.log.error( "Kernel info request failed, assuming current %s", info) info = {} self._finish_kernel_info(info) # close the kernel_info channel, we don't need it anymore if self.kernel_info_channel: self.kernel_info_channel.close() self.kernel_info_channel = None def _finish_kernel_info(self, info): """Finish handling kernel_info reply Set up protocol adaptation, if needed, and signal that connection can continue. """ protocol_version = info.get('protocol_version', client_protocol_version) if protocol_version != client_protocol_version: self.session.adapt_version = int(protocol_version.split('.')[0]) self.log.info( "Adapting from protocol version {protocol_version} (kernel {kernel_id}) to {client_protocol_version} (client)." .format(protocol_version=protocol_version, kernel_id=self.kernel_id, client_protocol_version=client_protocol_version)) if not self._kernel_info_future.done(): self._kernel_info_future.set_result(info) # async def pre_get(self): # # authenticate first # super(ZMQChannelsHandler, self).pre_get() # # check session collision: # await self._register_session() # # then request kernel info, waiting up to a certain time before giving up. # # We don't want to wait forever, because browsers don't take it well when # # servers never respond to websocket connection requests. # kernel = self.kernel_manager.get_kernel(self.kernel_id) # self.session.key = kernel.session.key # future = self.request_kernel_info() # def give_up(): # """Don't wait forever for the kernel to reply""" # if future.done(): # return # self.log.warning("Timeout waiting for kernel_info reply from %s", self.kernel_id) # future.set_result({}) # loop = IOLoop.current() # loop.add_timeout(loop.time() + self.kernel_info_timeout, give_up) # # actually wait for it # await future async def get(self, kernel_id): self.kernel_id = cast_unicode(kernel_id, 'ascii') await super(ZMQChannelsHandler, self).get(kernel_id=kernel_id) async def _register_session(self): """Ensure we aren't creating a duplicate session. If a previous identical session is still open, close it to avoid collisions. This is likely due to a client reconnecting from a lost network connection, where the socket on our side has not been cleaned up yet. """ self.session_key = '%s:%s' % (self.kernel_id, self.session.session) stale_handler = self._open_sessions.get(self.session_key) if stale_handler: self.log.warning("Replacing stale connection: %s", self.session_key) await stale_handler.close() self._open_sessions[self.session_key] = self def open(self, kernel_id): super(ZMQChannelsHandler, self).open() km = self.kernel_manager km.notify_connect(kernel_id) # on new connections, flush the message buffer buffer_info = km.get_buffer(kernel_id, self.session_key) if buffer_info and buffer_info['session_key'] == self.session_key: self.log.info("Restoring connection for %s", self.session_key) self.channels = buffer_info['channels'] replay_buffer = buffer_info['buffer'] if replay_buffer: self.log.info("Replaying %s buffered messages", len(replay_buffer)) for channel, msg_list in replay_buffer: stream = self.channels[channel] self._on_zmq_reply(stream, msg_list) else: try: self.create_stream() except web.HTTPError as e: self.log.error("Error opening stream: %s", e) # WebSockets don't response to traditional error codes so we # close the connection. for channel, stream in self.channels.items(): if not stream.closed(): stream.close() self.close() return km.add_restart_callback(self.kernel_id, self.on_kernel_restarted) km.add_restart_callback(self.kernel_id, self.on_restart_failed, 'dead') for channel, stream in self.channels.items(): stream.on_recv_stream(self._on_zmq_reply) def on_message(self, msg): if not self.channels: # already closed, ignore the message self.log.debug("Received message on closed websocket %r", msg) return if isinstance(msg, bytes): msg = deserialize_binary_message(msg) else: msg = json.loads(msg) channel = msg.pop('channel', None) if channel is None: self.log.warning("No channel specified, assuming shell: %s", msg) channel = 'shell' if channel not in self.channels: self.log.warning("No such channel: %r", channel) return am = self.kernel_manager.allowed_message_types mt = msg['header']['msg_type'] if am and mt not in am: self.log.warning( 'Received message of type "%s", which is not allowed. Ignoring.' % mt) else: stream = self.channels[channel] self.session.send(stream, msg) def _on_zmq_reply(self, stream, msg_list): idents, fed_msg_list = self.session.feed_identities(msg_list) msg = self.session.deserialize(fed_msg_list) parent = msg['parent_header'] def write_stderr(error_message): self.log.warning(error_message) msg = self.session.msg("stream", content={ "text": error_message + '\n', "name": "stderr" }, parent=parent) msg['channel'] = 'iopub' self.write_message(json.dumps(msg, default=date_default)) channel = getattr(stream, 'channel', None) msg_type = msg['header']['msg_type'] if channel == 'iopub' and msg_type == 'status' and msg['content'].get( 'execution_state') == 'idle': # reset rate limit counter on status=idle, # to avoid 'Run All' hitting limits prematurely. self._iopub_window_byte_queue = [] self._iopub_window_msg_count = 0 self._iopub_window_byte_count = 0 self._iopub_msgs_exceeded = False self._iopub_data_exceeded = False if channel == 'iopub' and msg_type not in { 'status', 'comm_open', 'execute_input' }: # Remove the counts queued for removal. now = IOLoop.current().time() while len(self._iopub_window_byte_queue) > 0: queued = self._iopub_window_byte_queue[0] if (now >= queued[0]): self._iopub_window_byte_count -= queued[1] self._iopub_window_msg_count -= 1 del self._iopub_window_byte_queue[0] else: # This part of the queue hasn't be reached yet, so we can # abort the loop. break # Increment the bytes and message count self._iopub_window_msg_count += 1 if msg_type == 'stream': byte_count = sum([len(x) for x in msg_list]) else: byte_count = 0 self._iopub_window_byte_count += byte_count # Queue a removal of the byte and message count for a time in the # future, when we are no longer interested in it. self._iopub_window_byte_queue.append( (now + self.rate_limit_window, byte_count)) # Check the limits, set the limit flags, and reset the # message and data counts. msg_rate = float( self._iopub_window_msg_count) / self.rate_limit_window data_rate = float( self._iopub_window_byte_count) / self.rate_limit_window # Check the msg rate if self.iopub_msg_rate_limit > 0 and msg_rate > self.iopub_msg_rate_limit: if not self._iopub_msgs_exceeded: self._iopub_msgs_exceeded = True write_stderr( dedent("""\ IOPub message rate exceeded. The Jupyter server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--ServerApp.iopub_msg_rate_limit`. Current values: ServerApp.iopub_msg_rate_limit={} (msgs/sec) ServerApp.rate_limit_window={} (secs) """.format(self.iopub_msg_rate_limit, self.rate_limit_window))) else: # resume once we've got some headroom below the limit if self._iopub_msgs_exceeded and msg_rate < ( 0.8 * self.iopub_msg_rate_limit): self._iopub_msgs_exceeded = False if not self._iopub_data_exceeded: self.log.warning("iopub messages resumed") # Check the data rate if self.iopub_data_rate_limit > 0 and data_rate > self.iopub_data_rate_limit: if not self._iopub_data_exceeded: self._iopub_data_exceeded = True write_stderr( dedent("""\ IOPub data rate exceeded. The Jupyter server will temporarily stop sending output to the client in order to avoid crashing it. To change this limit, set the config variable `--ServerApp.iopub_data_rate_limit`. Current values: ServerApp.iopub_data_rate_limit={} (bytes/sec) ServerApp.rate_limit_window={} (secs) """.format(self.iopub_data_rate_limit, self.rate_limit_window))) else: # resume once we've got some headroom below the limit if self._iopub_data_exceeded and data_rate < ( 0.8 * self.iopub_data_rate_limit): self._iopub_data_exceeded = False if not self._iopub_msgs_exceeded: self.log.warning("iopub messages resumed") # If either of the limit flags are set, do not send the message. if self._iopub_msgs_exceeded or self._iopub_data_exceeded: # we didn't send it, remove the current message from the calculus self._iopub_window_msg_count -= 1 self._iopub_window_byte_count -= byte_count self._iopub_window_byte_queue.pop(-1) return super(ZMQChannelsHandler, self)._on_zmq_reply(stream, msg) def close(self): super(ZMQChannelsHandler, self).close() return self._close_future def on_close(self): self.log.debug("Websocket closed %s", self.session_key) # unregister myself as an open session (only if it's really me) if self._open_sessions.get(self.session_key) is self: self._open_sessions.pop(self.session_key) km = self.kernel_manager if self.kernel_id in km: km.notify_disconnect(self.kernel_id) km.remove_restart_callback( self.kernel_id, self.on_kernel_restarted, ) km.remove_restart_callback( self.kernel_id, self.on_restart_failed, 'dead', ) # start buffering instead of closing if this was the last connection if km._kernel_connections[self.kernel_id] == 0: km.start_buffering(self.kernel_id, self.session_key, self.channels) self._close_future.set_result(None) return # This method can be called twice, once by self.kernel_died and once # from the WebSocket close event. If the WebSocket connection is # closed before the ZMQ streams are setup, they could be None. for channel, stream in self.channels.items(): if stream is not None and not stream.closed(): stream.on_recv(None) stream.close() self.channels = {} self._close_future.set_result(None) def _send_status_message(self, status): iopub = self.channels.get('iopub', None) if iopub and not iopub.closed(): # flush IOPub before sending a restarting/dead status message # ensures proper ordering on the IOPub channel # that all messages from the stopped kernel have been delivered iopub.flush() msg = self.session.msg("status", {'execution_state': status}) msg['channel'] = 'iopub' self.write_message(json.dumps(msg, default=date_default)) def on_kernel_restarted(self): logging.warn("kernel %s restarted", self.kernel_id) self._send_status_message('restarting') def on_restart_failed(self): logging.error("kernel %s restarted failed!", self.kernel_id) self._send_status_message('dead')
def get_zipped_content(camera_config: dict, media_type: str, group: str) -> typing.Awaitable: fut = Future() target_dir = camera_config.get('target_dir') if media_type == 'picture': exts = _PICTURE_EXTS elif media_type == 'movie': exts = _MOVIE_EXTS working = multiprocessing.Value('b') working.value = True # create a subprocess to add files to zip def do_zip(pipe): parent_pipe.close() mf = _list_media_files(target_dir, exts=exts, prefix=group) paths = [] for (p, st) in mf: # @UnusedVariable path = p[len(target_dir):] if path.startswith('/'): path = path[1:] paths.append(path) zip_filename = os.path.join(settings.MEDIA_PATH, '.zip-%s' % int(time.time())) logging.debug('adding %d files to zip file "%s"' % (len(paths), zip_filename)) try: with zipfile.ZipFile(zip_filename, mode='w') as f: for path in paths: full_path = os.path.join(target_dir, path) f.write(full_path, path) except Exception as e: logging.error('failed to create zip file "%s": %s' % (zip_filename, e)) working.value = False pipe.close() return logging.debug('reading zip file "%s" into memory' % zip_filename) try: with open(zip_filename, mode='rb') as f: data = f.read() working.value = False pipe.send(data) logging.debug('zip data ready') except Exception as e: logging.error('failed to read zip file "%s": %s' % (zip_filename, e)) working.value = False finally: os.remove(zip_filename) pipe.close() logging.debug('starting zip process...') (parent_pipe, child_pipe) = multiprocessing.Pipe(duplex=False) process = multiprocessing.Process(target=do_zip, args=(child_pipe,)) process.start() child_pipe.close() # poll the subprocess to see when it has finished started = datetime.datetime.now() def poll_process(): io_loop = IOLoop.instance() if working.value: now = datetime.datetime.now() delta = now - started if delta.seconds < settings.ZIP_TIMEOUT: io_loop.add_timeout(datetime.timedelta(seconds=0.5), poll_process) else: # process did not finish in time logging.error('timeout waiting for the zip process to finish') try: os.kill(process.pid, signal.SIGTERM) except: pass # nevermind fut.set_result(None) else: # finished try: data = parent_pipe.recv() logging.debug('zip process has returned %d bytes' % len(data)) except: data = None fut.set_result(data) poll_process() return fut
def __init__(self, *args, **kwargs): Cursor.__init__(self, *args, **kwargs) self.new_response = Future()