class Client(object): def __init__(self, host, port, timeout=None, connect_timeout=-1, unix_socket=None, max_buffer_size=104857600): self._io_loop = IOLoop() self._async_client = AsyncClient(host, port, unix_socket, self._io_loop, timeout, connect_timeout, max_buffer_size) self._response = None self._closed = False def __del__(self): self.close() @property def closed(self): return self._async_client.closed def close(self): if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def call(self, request): def callback(response): self._response = response self._io_loop.stop() self._async_client.call(request, callback) self._io_loop.start() response = self._response self._response = None response.rethrow() return response def __str__(self): return str(self._async_client)
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port, local_dir, services): """ Function run by the Nanny when creating the worker """ from distributed import Worker from tornado.ioloop import IOLoop IOLoop.clear_instance() loop = IOLoop() loop.make_current() worker = Worker(center_ip, center_port, ncores=ncores, ip=ip, service_ports={'nanny': nanny_port}, local_dir=local_dir, services=services) @gen.coroutine def start(): try: yield worker._start() except Exception as e: logger.exception(e) q.put(e) else: assert worker.port q.put({'port': worker.port, 'dir': worker.local_dir}) loop.add_callback(start) loop.start()
def run_worker_fork(q, ip, scheduler_ip, scheduler_port, ncores, nanny_port, worker_port, local_dir, services, name, memory_limit): """ Function run by the Nanny when creating the worker """ from distributed import Worker # pragma: no cover from tornado.ioloop import IOLoop # pragma: no cover IOLoop.clear_instance() # pragma: no cover loop = IOLoop() # pragma: no cover loop.make_current() # pragma: no cover worker = Worker(scheduler_ip, scheduler_port, ncores=ncores, ip=ip, service_ports={'nanny': nanny_port}, local_dir=local_dir, services=services, name=name, memory_limit=memory_limit, loop=loop) # pragma: no cover @gen.coroutine # pragma: no cover def start(): try: # pragma: no cover yield worker._start(worker_port) # pragma: no cover except Exception as e: # pragma: no cover logger.exception(e) # pragma: no cover q.put(e) # pragma: no cover else: assert worker.port # pragma: no cover q.put({'port': worker.port, 'dir': worker.local_dir}) # pragma: no cover loop.add_callback(start) # pragma: no cover try: loop.start() # pragma: no cover finally: loop.stop() loop.close(all_fds=True)
def run_worker(q, ip, center_ip, center_port, ncores, nanny_port, worker_port, local_dir, services, name): """ Function run by the Nanny when creating the worker """ from distributed import Worker # pragma: no cover from tornado.ioloop import IOLoop # pragma: no cover IOLoop.clear_instance() # pragma: no cover loop = IOLoop() # pragma: no cover loop.make_current() # pragma: no cover worker = Worker(center_ip, center_port, ncores=ncores, ip=ip, service_ports={'nanny': nanny_port}, local_dir=local_dir, services=services, name=name) # pragma: no cover @gen.coroutine # pragma: no cover def start(): try: # pragma: no cover yield worker._start(worker_port) # pragma: no cover except Exception as e: # pragma: no cover logger.exception(e) # pragma: no cover q.put(e) # pragma: no cover else: assert worker.port # pragma: no cover q.put({'port': worker.port, 'dir': worker.local_dir}) # pragma: no cover loop.add_callback(start) # pragma: no cover with ignoring(KeyboardInterrupt): loop.start() # pragma: no cover
class SyncHTTPClientTest(unittest.TestCase): def setUp(self): self.server_ioloop = IOLoop() event = threading.Event() @gen.coroutine def init_server(): sock, self.port = bind_unused_port() app = Application([("/", HelloWorldHandler)]) self.server = HTTPServer(app) self.server.add_socket(sock) event.set() def start(): self.server_ioloop.run_sync(init_server) self.server_ioloop.start() self.server_thread = threading.Thread(target=start) self.server_thread.start() event.wait() self.http_client = HTTPClient() def tearDown(self): def stop_server(): self.server.stop() # Delay the shutdown of the IOLoop by several iterations because # the server may still have some cleanup work left when # the client finishes with the response (this is noticeable # with http/2, which leaves a Future with an unexamined # StreamClosedError on the loop). @gen.coroutine def slow_stop(): # The number of iterations is difficult to predict. Typically, # one is sufficient, although sometimes it needs more. for i in range(5): yield self.server_ioloop.stop() self.server_ioloop.add_callback(slow_stop) self.server_ioloop.add_callback(stop_server) self.server_thread.join() self.http_client.close() self.server_ioloop.close(all_fds=True) def get_url(self, path): return "http://127.0.0.1:%d%s" % (self.port, path) def test_sync_client(self): response = self.http_client.fetch(self.get_url("/")) self.assertEqual(b"Hello world!", response.body) def test_sync_client_error(self): # Synchronous HTTPClient raises errors directly; no need for # response.rethrow() with self.assertRaises(HTTPError) as assertion: self.http_client.fetch(self.get_url("/notfound")) self.assertEqual(assertion.exception.code, 404)
def run(self): loop = IOLoop() app = Application([ (r'/', WsSocketHandler) ]) app.listen(self.port) loop.start()
def test__yield_for_all_futures(): loop = IOLoop() loop.make_current() @gen.coroutine def several_steps(): value = 0 value += yield async_value(1) value += yield async_value(2) value += yield async_value(3) raise gen.Return(value) result = {} def on_done(future): result["value"] = future.result() loop.stop() loop.add_future(yield_for_all_futures(several_steps()), on_done) try: loop.start() except KeyboardInterrupt as e: print("keyboard interrupt") assert 6 == result["value"] loop.close()
class HTTPClient(object): """ 阻塞式的HTTP客户端。使用ioloop+异步HTTP客户端实现,基本只是用于测试。 """ def __init__(self, async_client_class=None, **kwargs): self._io_loop = IOLoop() if async_client_class is None: async_client_class = AsyncHTTPClient self._async_client = async_client_class(self._io_loop, **kwargs) self._response = None self._closed = False def __del__(self): self.close() def close(self): if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def fetch(self, request, **kwargs): def callback(response): self._response = response self._io_loop.stop() self._async_client.fetch(request, callback, **kwargs) self._io_loop.start() response = self._response self._response = None response.rethrow() return response
class SocketServerThreadStarter(Thread): ''' Used to fire up the three services each in its own thread. ''' def __init__(self, socketServerClassName, port): ''' Create one thread for one of the services to run in. @param socketServerClassName: Name of top level server class to run. @type socketServerClassName: string @param port: port to listen on @type port: int ''' super(SocketServerThreadStarter, self).__init__(); self.socketServerClassName = socketServerClassName; self.port = port; self.ioLoop = None; def stop(self): self.ioLoop.stop(); def run(self): ''' Use the service name to instantiate the proper service, passing in the proper helper class. ''' super(SocketServerThreadStarter, self).run(); try: if self.socketServerClassName == 'RootWordSubmissionService': EchoTreeService.log("Starting EchoTree new tree submissions server %d: accepts word trees submitted from connecting clients." % self.port); http_server = RootWordSubmissionService(RootWordSubmissionService.handle_request); http_server.listen(self.port); self.ioLoop = IOLoop(); self.ioLoop.start(); self.ioLoop.close(all_fds=True); return; elif self.socketServerClassName == 'EchoTreeScriptRequestHandler': EchoTreeService.log("Starting EchoTree script server %d: Returns one script that listens to the new-tree events in the browser." % self.port); http_server = EchoTreeScriptRequestHandler(EchoTreeScriptRequestHandler.handle_request); http_server.listen(self.port); self.ioLoop = IOLoop(); self.ioLoop.start(); self.ioLoop.close(all_fds=True); return; else: raise ValueError("Service class %s is unknown." % self.socketServerClassName); except Exception: # Typically an exception is caught here that complains about 'socket in use' # Should avoid that by sensing busy socket and timing out: # if e.errno == 98: # print "Exception: %s. You need to try starting this service again. Socket busy condition will time out within 30 secs or so." % `e` # else: # print `e`; #raise e; pass finally: if self.ioLoop is not None and self.ioLoop.running(): self.ioLoop.stop(); return;
class SocketServerThreadStarter(Thread): ''' Convenience for firing up various servers. Currently not used. In its current form it knows to start the service that distributes a JavaScript script that subscribes to the EchoTree service (the main class, which inherits from WebSocketHandler. Need to start the script server (EchoTreeScriptRequestHandler) in main() if this module is used stand-alone, rather than from some browser-side script that already knows how to push new root words, and subscribe to EchoTrees. ''' def __init__(self, socketServerClassName, port): ''' Create one thread for one of the services to run in. @param socketServerClassName: Name of top level server class to run. @type socketServerClassName: string @param port: port to listen on @type port: int ''' super(SocketServerThreadStarter, self).__init__(); self.socketServerClassName = socketServerClassName; self.port = port; self.ioLoop = None; def stop(self): self.ioLoop.stop(); def run(self): ''' Use the service name to instantiate the proper service, passing in the proper helper class. ''' super(SocketServerThreadStarter, self).run(); try: if self.socketServerClassName == 'EchoTreeScriptRequestHandler': EchoTreeService.log("Starting EchoTree script server %d: Returns one script that listens to the new-tree events in the browser." % self.port); http_server = EchoTreeScriptRequestHandler(EchoTreeScriptRequestHandler.handle_request); http_server.listen(self.port); self.ioLoop = IOLoop(); self.ioLoop.start(); self.ioLoop.close(all_fds=True); return; else: raise ValueError("Service class %s is unknown." % self.socketServerClassName); except Exception: # Typically an exception is caught here that complains about 'socket in use' # Should avoid that by sensing busy socket and timing out: # if e.errno == 98: # print "Exception: %s. You need to try starting this service again. Socket busy condition will time out within 30 secs or so." % `e` # else: # print `e`; #raise e; pass finally: if self.ioLoop is not None and self.ioLoop.running(): self.ioLoop.stop(); return;
class TestGitHubParser(TestCase, TimeoutMixin): def setUp(self): self._ioloop = IOLoop() def test_github_parser(self): result = {} def callback(commit_result): for key in commit_result: result[key] = commit_result[key] self._ioloop.stop() github_parser = GitHubParser(self._ioloop, {}) github_parser.parse("https://github.com/julython/" "julythontweets/commit/25645d2cf6b58d2657cf6eb0fb4ca59d5f2499f4", callback) self.add_timeout(3) self._ioloop.start() # will block until callback or timeout self.assertEqual("Josh Marshall", result["author"]["name"]) self.assertEqual("joshmarshall", result["author"]["username"]) self.assertEqual( "25645d2cf6b58d2657cf6eb0fb4ca59d5f2499f4", result["commit"]) def test_get_commit_atom_link(self): """Test extracting commit Atom URL from an HTML page.""" with self.assertRaises(MissingAtomLink): get_commit_atom_link("WHATEVER") with self.assertRaises(MissingAtomLink): get_commit_atom_link("<html></html>") html = open(_GITHUB_SAMPLE_HTML_PATH).read() atom_link = get_commit_atom_link(html) self.assertEqual( "https://github.com/julython/julythontweets/commits/master.atom", atom_link) def test_get_most_recent_commit(self): """Test extracting most recent commit from an Atom Feed.""" with self.assertRaises(MissingCommit): get_most_recent_commit("whatever") with self.assertRaises(MissingCommit): get_most_recent_commit("<rss></rss>") feed = open(_GITHUB_SAMPLE_ATOM_PATH).read() commit = get_most_recent_commit(feed) self.assertEqual({ "name": "Josh Marshall", "url": "https://github.com/joshmarshall", "username": "******", "service": "github" }, commit["author"]) self.assertEqual("25645d2cf6b58d2657cf6eb0fb4ca59d5f2499f4", commit["commit"]) self.assertEqual("julython/julythontweets", commit["project"]["id"]) self.assertEqual("github", commit["project"]["service"])
class XDebugServer(TCPServer): """Class to listen for xdebug requests""" def __init__(self): """Constructor """ self.ioloop = IOLoop() super(XDebugServer, self).__init__(io_loop=self.ioloop) self.listen(9000) # this is for cross thread communication self.inport = Queue() self.outport = Queue() self._xdebug_connection = None def listenfunc(): self.ioloop.make_current() self.ioloop.start() self.ioloop.close(all_fds=True) self.listener_thread = threading.Thread(target=listenfunc) self.listener_thread.daemon = True self.listener_thread.start() def handle_stream(self, stream, address): """Handle a connection Only one connection at a time, for now :stream: @todo :address: @todo :returns: @todo """ self._xdebug_connection = XDebugConnection(self, stream, address) def run_command(self, command, data=None): """Send status :returns: @todo """ self.inport.put("{} -i 1\0".format(str(command))) return self.outport.get() def stop(self): """Stop tornado event loop :returns: @todo """ self.ioloop.stop() self.listener_thread.join() del self.ioloop del self.listener_thread
def run_server(port, daemon='start'): application = tornado.web.Application([ (r"/", TestHandler, {'port': port}), ]) ioloop = IOLoop() ioloop.make_current() SERVER_LOOPS.append(ioloop) application.listen(port) ioloop.start()
class TornadoFlask(Flask): def run(self, host='0.0.0.0', port=5000): self.ioloop = IOLoop() http_server = HTTPServer(WSGIContainer(app), io_loop=self.ioloop) http_server.listen(port, host) self.ioloop.start() def quit(self): self.ioloop.stop()
def run_nanny(port, center_port, **kwargs): from distributed import Nanny from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) worker = Nanny('127.0.0.1', port, port + 1000, '127.0.0.1', center_port, **kwargs) loop.run_sync(worker._start) loop.start()
def run_center(port): from distributed import Center from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) center = Center('127.0.0.1', port) center.listen(port) loop.start()
def main(): logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) ioloop = IOLoop() bugs = compile_regexes(import_bugs()) sites = import_sites("top100k") outcsv = csv.writer(open('output.csv', 'w'), lineterminator='\n') SitesCrawler(ioloop, 15, sites, bugs, outcsv).run() ioloop.start()
class HTTPClient(object): """A blocking HTTP client. This interface is provided for convenience and testing; most applications that are running an IOLoop will want to use `AsyncHTTPClient` instead. Typical usage looks like this:: http_client = httpclient.HTTPClient() try: response = http_client.fetch("http://www.google.com/") print response.body except httpclient.HTTPError, e: print "Error:", e """ def __init__(self, async_client_class=None, **kwargs): self._io_loop = IOLoop() if async_client_class is None: async_client_class = AsyncHTTPClient self._async_client = async_client_class(self._io_loop, **kwargs) self._response = None self._closed = False def __del__(self): self.close() def close(self): """Closes the HTTPClient, freeing any resources used.""" if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def fetch(self, request, **kwargs): """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError`. """ def callback(response): self._response = response self._io_loop.stop() self._async_client.fetch(request, callback, **kwargs) self._io_loop.start() response = self._response self._response = None response.rethrow() return response
class FakeServerContext(object): def __init__(self, monkeypatch, fail_these, expected_basename): self._monkeypatch = monkeypatch self._fail_these = fail_these self._expected_basename = expected_basename self._url = None self._loop = None self._started = threading.Condition() self._thread = threading.Thread(target=self._run) def __exit__(self, type, value, traceback): if self._loop is not None: # we can ONLY use add_callback here, since the loop is # running in a different thread. self._loop.add_callback(self._stop) self._thread.join() def __enter__(self): self._started.acquire() self._thread.start() self._started.wait() self._started.release() _monkeypatch_client_config(self._monkeypatch, self._url) return self._url def _run(self): self._loop = IOLoop() self._server = FakeAnacondaServer(io_loop=self._loop, fail_these=self._fail_these, expected_basename=self._expected_basename) self._url = self._server.url def notify_started(): self._started.acquire() self._started.notify() self._started.release() self._loop.add_callback(notify_started) self._loop.start() # done self._server.unlisten() def _stop(self): def really_stop(): if self._loop is not None: self._loop.stop() self._loop = None # the delay allows pending next-tick things to go ahead # and happen, which may avoid some problems with trying to # output to stdout after pytest closes it if self._loop is not None: self._loop.call_later(delay=0.05, callback=really_stop)
def run_worker(port, center_port, **kwargs): from distributed import Worker from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop() loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) worker = Worker("127.0.0.1", port, "127.0.0.1", center_port, **kwargs) worker.start() loop.start()
def test_io_loop(self): global_loop = self.io_loop custom_loop = IOLoop() self.assertNotEqual(global_loop, custom_loop) q = toro.JoinableQueue(io_loop=custom_loop) def callback(future): assert future.result() == 'foo' custom_loop.stop() q.get().add_done_callback(callback) q.put('foo') custom_loop.start()
def run_worker(q, center_port, **kwargs): from distributed import Worker from tornado.ioloop import IOLoop, PeriodicCallback import logging with log_errors(): IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) worker = Worker('127.0.0.1', center_port, ip='127.0.0.1', **kwargs) loop.run_sync(lambda: worker._start(0)) q.put(worker.port) loop.start()
def _initialize(queue): result = None try: # create new IOLoop in the thread io_loop = IOLoop() # make it default for that thread io_loop.make_current() result = io_loop io_loop.add_callback(queue.put, result) io_loop.start() except Exception as err: # pragma: no cover result = err finally: # pragma: no cover queue.put(result)
class TestIOLoopCurrent(unittest.TestCase): def setUp(self): self.io_loop = IOLoop() def tearDown(self): self.io_loop.close() def test_current(self): def f(): self.current_io_loop = IOLoop.current() self.io_loop.stop() self.io_loop.add_callback(f) self.io_loop.start() self.assertIs(self.current_io_loop, self.io_loop)
def pdf_capture(static_path, capture_server_class=None): """ Starts a tornado server which serves all of the jupyter path locations as well as the working directory """ settings = { "static_path": static_path } handlers = [ (r"/(.*)", tornado.web.StaticFileHandler, { "path": settings['static_path'] }) ] # add the jupyter static paths for path in jupyter_path(): handlers += [ (r"/static/(.*)", tornado.web.StaticFileHandler, { "path": os.path.join(path, "static") }) ] app = tornado.web.Application(handlers, **settings) if capture_server_class is None: server = CaptureServer(app) else: _module, _klass = capture_server_class.split(":") server = getattr(import_module(_module), _klass)(app) # can't pass this to the constructor for some reason... server.static_path = static_path # add the parsed, normalized notebook with open(os.path.join(static_path, "notebook.ipynb")) as fp: server.notebook = nbformat.read(fp, IPYNB_VERSION) ioloop = IOLoop() # server.capture will be called when the ioloop is bored for the first time ioloop.add_callback(server.capture) # connect to a port server.listen(PORT) try: # run forever ioloop.start() except KeyboardInterrupt: # this is probably not the best way to escape, but works for now print("Successfully created PDF")
def run(self, **kwargs): """ Start the tornado server, run forever""" try: loop = IOLoop() app = self.make_app() app.listen(self.port) loop.start() except socket.error as serr: # Re raise the socket error if not "[Errno 98] Address already in use" if serr.errno != errno.EADDRINUSE: raise serr else: logger.warning('The webserver port {} is already used. May be the HttpRobotServer is already running or another software is using this port.'.format(self.port))
def run_worker(q, scheduler_port, **kwargs): from distributed import Worker from tornado.ioloop import IOLoop, PeriodicCallback with log_errors(): IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() worker = Worker('127.0.0.1', scheduler_port, ip='127.0.0.1', loop=loop, validate=True, **kwargs) loop.run_sync(lambda: worker._start(0)) q.put(worker.port) try: loop.start() finally: loop.close(all_fds=True)
def run_scheduler(q, scheduler_port=0, **kwargs): from distributed import Scheduler from tornado.ioloop import IOLoop, PeriodicCallback IOLoop.clear_instance() loop = IOLoop(); loop.make_current() PeriodicCallback(lambda: None, 500).start() scheduler = Scheduler(loop=loop, validate=True, **kwargs) done = scheduler.start(scheduler_port) q.put(scheduler.port) try: loop.start() finally: loop.close(all_fds=True)
def run_worker(q, ip, port, center_ip, center_port, ncores, nanny_port): """ Function run by the Nanny when creating the worker """ from distributed import Worker from tornado.ioloop import IOLoop IOLoop.clear_instance() loop = IOLoop() loop.make_current() worker = Worker(ip, port, center_ip, center_port, ncores, nanny_port=nanny_port) @gen.coroutine def start(): yield worker._start() q.put(worker.port) loop.add_callback(start) loop.start()
class Client(object): def __init__(self, host, port, timeout=None, connect_timeout=-1, unix_socket=None, max_buffer_size=104857600): def connect(): self._io_loop = IOLoop() self._async_client = AsyncClient(host, port, timeout, connect_timeout, unix_socket, self._io_loop, max_buffer_size) self._response = None self._connect = connect self._response = None self._connect() @property def closed(self): return self._async_client is None def close(self): if self.closed: return if self._async_client: self._async_client.close() self._io_loop.close() self._async_client = None self._io_loop = None self._response = None def call(self, request): if self.closed: self._connect() def callback(response): self._response = response self._io_loop.stop() self._async_client.call(request, callback) self._io_loop.start() response = self._response self._response = None try: response.rethrow() except: self.close() raise return response def __del__(self): self.close() def __str__(self): return 'STPClient to %s' % str(self._async_client.connection)
def test__ioloop_not_forcibly_stopped() -> None: # Issue #5494 application = Application() loop = IOLoop() loop.make_current() server = Server(application, io_loop=loop) server.start() result = [] def f(): server.unlisten() server.stop() # If server.stop() were to stop the Tornado IO loop, # g() wouldn't be called and `result` would remain empty. loop.add_timeout(timedelta(seconds=0.01), g) def g(): result.append(None) loop.stop() loop.add_callback(f) loop.start() assert result == [None]
def run(self, loop=None): ''' Start servicing the Tornado event loop. ''' if not loop: loop = IOLoop() loop.make_current() # bind the socket self.listen(self._port, self._address) logger.info('Pensive started on {}:{}'.format(self._address or '*', self._port)) try: loop.start() except KeyboardInterrupt: pass loop.stop() loop.close() logger.info('Pensive stopped')
def run_center(q): from distributed import Center from tornado.ioloop import IOLoop, PeriodicCallback import logging IOLoop.clear_instance() loop = IOLoop() loop.make_current() PeriodicCallback(lambda: None, 500).start() logging.getLogger("tornado").setLevel(logging.CRITICAL) center = Center('127.0.0.1') while True: try: center.listen(0) break except Exception as e: logging.info("Could not start center on port. Retrying", exc_info=True) q.put(center.port) try: loop.start() finally: loop.close(all_fds=True)
class Snap(Fap): PLAYABLE = True ACTIVATED = False OFF = "turnoff" def __init__(self, username, userid): Fap.__init__(self, username, userid) self.flask = Flask(__name__) self.current_auth_nick = self.OFF self.nicknames = {} self.lock = RLock() CORS(self.flask) self.port = int(33450) self.loop = None self.route() def route(self): #self.flask.route('/set_pixel_rgb', methods=['POST'])(self.set_pixel_rgb) self.flask.route('/set_rgb_matrix', methods=['POST'])(self.set_rgb_matrix) self.flask.route('/is_authorized/<nickname>', methods=['GET'])(self.is_authorized) self.flask.route('/get_nickname', methods=['GET'])(self.get_nickname) self.flask.route('/clients', methods=['GET'])(self.get_clients) self.flask.route('/authorize', methods=['POST'])(self.authorize) def check_nicknames_validity(self): with self.lock: temp_dict = {} for nick, timestamps in self.nicknames.items(): if time() - timestamps["last_seen"] < 20: temp_dict[nick] = timestamps else: if nick == self.current_auth_nick: self.current_auth_nick = self.OFF self.nicknames = temp_dict @authentication_required def get_clients(self, user): if not is_admin(user): abort(403, "Forbidden Bru") # update user table self.check_nicknames_validity() return dumps({ "list_clients": sorted(self.nicknames.keys(), key=lambda x: self.nicknames[x]["appeared"]), "selected_client": self.current_auth_nick }) @authentication_required def authorize(self, user): if not is_admin(user): abort(403, "Forbidden Bru") data = loads(request.get_data().decode()) # {selected_client: ""} with self.lock: if "selected_client" in data and data["selected_client"] in [ self.OFF ] + list(self.nicknames.keys()): self.current_auth_nick = data["selected_client"] self.erase_all() return dumps({ "success": True, "message": "Client authorized successfully" }) return dumps({"success": False, "message": "No such client"}) @staticmethod def scale(v): return min(1., max(0., float(v) / 255)) def set_rgb_matrix(self): data = request.get_data().decode().split(':') if data.pop(0) == self.current_auth_nick: nb_rows = 4 nb_cols = 19 r = 0 c = 0 with self.lock: while data: red = data.pop(0) green = data.pop(0) blue = data.pop(0) self.model.set_pixel( r, c, list(map(self.scale, [red, green, blue]))) if c < nb_cols - 1: c += 1 else: c = 0 r += 1 self.send_model() return 'OK' abort(403, "Snap client not authorized") def erase_all(self): with self.lock: self.model.set_all("black") self.send_model() return 'OK' def is_authorized(self, nickname): with self.lock: self.nicknames[nickname]["last_seen"] = time() return str(nickname == self.current_auth_nick) def get_nickname(self): rand_id = petname.generate() with self.lock: while rand_id in self.nicknames.keys(): rand_id = petname.generate() self.nicknames[rand_id] = {"appeared": time(), "last_seen": time()} return rand_id def run(self, params, expires_at=None): self.start_socket() from tornado.wsgi import WSGIContainer self.erase_all() self.loop = IOLoop() http_server = HTTPServer(WSGIContainer(self.flask)) http_server.listen(self.port) self.loop.start()
class UIServer(threading.Thread): config = None started = False io_loop = None server = None app = None def __init__(self, unmanic_data_queues, foreman, developer): super(UIServer, self).__init__(name='UIServer') self.config = config.Config() self.developer = developer self.data_queues = unmanic_data_queues self.logger = unmanic_data_queues["logging"].get_logger(self.name) self.inotifytasks = unmanic_data_queues["inotifytasks"] # TODO: Move all logic out of template calling to foreman. # Create methods here to handle the calls and rename to foreman self.foreman = foreman self.set_logging() # Add a singleton for handling the data queues for sending data to unmanic's other processes udq = UnmanicDataQueues() udq.set_unmanic_data_queues(unmanic_data_queues) urt = UnmanicRunningTreads() urt.set_unmanic_running_threads( { 'foreman': foreman } ) def _log(self, message, message2='', level="info"): message = common.format_message(message, message2) getattr(self.logger, level)(message) def stop(self): if self.started: self.started = False if self.io_loop: self.io_loop.add_callback(self.io_loop.stop) def set_logging(self): if self.config and self.config.get_log_path(): # Create directory if not exists if not os.path.exists(self.config.get_log_path()): os.makedirs(self.config.get_log_path()) # Create file handler log_file = os.path.join(self.config.get_log_path(), 'tornado.log') file_handler = logging.handlers.TimedRotatingFileHandler(log_file, when='midnight', interval=1, backupCount=7) file_handler.setLevel(logging.INFO) # Set tornado.access logging to file. Disable propagation of logs tornado_access = logging.getLogger("tornado.access") if self.developer: tornado_access.setLevel(logging.DEBUG) else: tornado_access.setLevel(logging.INFO) tornado_access.addHandler(file_handler) tornado_access.propagate = False # Set tornado.application logging to file. Enable propagation of logs tornado_application = logging.getLogger("tornado.application") if self.developer: tornado_application.setLevel(logging.DEBUG) else: tornado_application.setLevel(logging.INFO) tornado_application.addHandler(file_handler) tornado_application.propagate = True # Send logs also to root logger (command line) # Set tornado.general logging to file. Enable propagation of logs tornado_general = logging.getLogger("tornado.general") if self.developer: tornado_general.setLevel(logging.DEBUG) else: tornado_general.setLevel(logging.INFO) tornado_general.addHandler(file_handler) tornado_general.propagate = True # Send logs also to root logger (command line) def update_tornado_settings(self): # Check if this is a development environment or not if self.developer: tornado_settings['autoreload'] = True tornado_settings['serve_traceback'] = True def run(self): asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) self.started = True # Configure tornado server based on config self.update_tornado_settings() # Load the app self.app = self.make_web_app() # TODO: add support for HTTPS # Web Server self.server = HTTPServer( self.app, ssl_options=None, ) try: self.server.listen(int(self.config.get_ui_port())) except socket.error as e: self._log("Exception when setting WebUI port {}:".format(self.config.get_ui_port()), message2=str(e), level="warning") raise SystemExit self.io_loop = IOLoop().current() self.io_loop.start() self.io_loop.close(True) self._log("Leaving UIServer loop...") def make_web_app(self): # Start with web application routes from unmanic.webserver.websocket import UnmanicWebsocketHandler app = Application([ (r"/unmanic/websocket", UnmanicWebsocketHandler), (r"/unmanic/downloads/(.*)", DownloadsHandler), (r"/(.*)", RedirectHandler, dict( url="/unmanic/ui/dashboard/" )), ], **tornado_settings) # Add API routes from unmanic.webserver.api_request_router import APIRequestRouter app.add_handlers(r'.*', [ ( PathMatches(r"/unmanic/api/.*"), APIRequestRouter(app) ), ]) # Add frontend routes from unmanic.webserver.main import MainUIRequestHandler app.add_handlers(r'.*', [ (r"/unmanic/css/(.*)", StaticFileHandler, dict( path=tornado_settings['static_css'] )), (r"/unmanic/fonts/(.*)", StaticFileHandler, dict( path=tornado_settings['static_fonts'] )), (r"/unmanic/icons/(.*)", StaticFileHandler, dict( path=tornado_settings['static_icons'] )), (r"/unmanic/img/(.*)", StaticFileHandler, dict( path=tornado_settings['static_img'] )), (r"/unmanic/js/(.*)", StaticFileHandler, dict( path=tornado_settings['static_js'] )), ( PathMatches(r"/unmanic/ui/(.*)"), MainUIRequestHandler, ), ]) # Add widgets routes from unmanic.webserver.plugins import DataPanelRequestHandler from unmanic.webserver.plugins import PluginStaticFileHandler from unmanic.webserver.plugins import PluginAPIRequestHandler app.add_handlers(r'.*', [ ( PathMatches(r"/unmanic/panel/[^/]+(/(?!static/|assets$).*)?$"), DataPanelRequestHandler ), ( PathMatches(r"/unmanic/plugin_api/[^/]+(/(?!static/|assets$).*)?$"), PluginAPIRequestHandler ), (r"/unmanic/panel/.*/static/(.*)", PluginStaticFileHandler, dict( path=tornado_settings['static_img'] )), ]) if self.developer: self._log("API Docs - Updating...", level="debug") try: from unmanic.webserver.api_v2.schema.swagger import generate_swagger_file errors = generate_swagger_file() for error in errors: self._log(error, level="warn") else: self._log("API Docs - Updated successfully", level="debug") except Exception as e: self._log("Failed to reload API schema", message2=str(e), level="error") # Start the Swagger UI. Automatically generated swagger.json can also # be served using a separate Swagger-service. from swagger_ui import tornado_api_doc tornado_api_doc( app, config_path=os.path.join(os.path.dirname(__file__), "..", "webserver", "docs", "api_schema_v2.json"), url_prefix="/unmanic/swagger", title="Unmanic application API" ) return app
class WebServer(threading.Thread): def __init__(self): super(WebServer, self).__init__() self.name = "TORNADO" self.daemon = True self.started = False self.video_root = None self.api_root = None self.app = None self.server = None self.io_loop = None def run(self): self.started = True self.io_loop = IOLoop() # load languages tornado.locale.load_gettext_translations(sickrage.LOCALE_DIR, 'messages') # Check configured web port is correct if sickrage.app.config.web_port < 21 or sickrage.app.config.web_port > 65535: sickrage.app.config.web_port = 8081 # clear mako cache folder mako_cache = os.path.join(sickrage.app.cache_dir, 'mako') if os.path.isdir(mako_cache): shutil.rmtree(mako_cache, ignore_errors=True) # video root if sickrage.app.config.root_dirs: root_dirs = sickrage.app.config.root_dirs.split('|') self.video_root = root_dirs[int(root_dirs[0]) + 1] # web root if sickrage.app.config.web_root: sickrage.app.config.web_root = sickrage.app.config.web_root = ( '/' + sickrage.app.config.web_root.lstrip('/').strip('/')) # api root self.api_root = r'%s/api/%s' % (sickrage.app.config.web_root, sickrage.app.config.api_key) # tornado setup if sickrage.app.config.enable_https: # If either the HTTPS certificate or key do not exist, make some self-signed ones. if not create_https_certificates(sickrage.app.config.https_cert, sickrage.app.config.https_key): sickrage.app.log.info("Unable to create CERT/KEY files, disabling HTTPS") sickrage.app.config.enable_https = False if not (os.path.exists(sickrage.app.config.https_cert) and os.path.exists(sickrage.app.config.https_key)): sickrage.app.log.warning("Disabled HTTPS because of missing CERT and KEY files") sickrage.app.config.enable_https = False # Load templates mako_lookup = TemplateLookup( directories=[sickrage.app.config.gui_views_dir], module_directory=os.path.join(sickrage.app.cache_dir, 'mako'), filesystem_checks=True, strict_undefined=True, input_encoding='utf-8', output_encoding='utf-8', encoding_errors='replace' ) templates = {} for root, dirs, files in os.walk(sickrage.app.config.gui_views_dir): path = root.split(os.sep) for x in sickrage.app.config.gui_views_dir.split(os.sep): if x in path: del path[path.index(x)] for file in files: filename = '{}/{}'.format('/'.join(path), file).lstrip('/') templates[filename] = mako_lookup.get_template(filename) # Load the app self.app = Application( debug=True, autoreload=False, gzip=sickrage.app.config.web_use_gzip, cookie_secret=sickrage.app.config.web_cookie_secret, login_url='%s/login/' % sickrage.app.config.web_root, templates=templates, default_handler_class=NotFoundHandler ) # Websocket handler self.app.add_handlers('.*$', [ (r'%s/ws/ui' % sickrage.app.config.web_root, WebSocketUIHandler) ]) # Static File Handlers self.app.add_handlers('.*$', [ # api (r'%s/api/(\w{32})(/?.*)' % sickrage.app.config.web_root, ApiHandler), # redirect to home (r"(%s)(/?)" % sickrage.app.config.web_root, RedirectHandler, {"url": "%s/home" % sickrage.app.config.web_root}), # api builder (r'%s/api/builder' % sickrage.app.config.web_root, RedirectHandler, {"url": sickrage.app.config.web_root + '/apibuilder/'}), # login (r'%s/login(/?)' % sickrage.app.config.web_root, LoginHandler), # logout (r'%s/logout(/?)' % sickrage.app.config.web_root, LogoutHandler), # favicon (r'%s/(favicon\.ico)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'images/favicon.ico')}), # images (r'%s/images/(.*)' % sickrage.app.config.web_root, StaticImageHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'images')}), # css (r'%s/css/(.*)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'css')}), # scss (r'%s/scss/(.*)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'scss')}), # fonts (r'%s/fonts/(.*)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'fonts')}), # javascript (r'%s/js/(.*)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": os.path.join(sickrage.app.config.gui_static_dir, 'js')}), # videos (r'%s/videos/(.*)' % sickrage.app.config.web_root, StaticNoCacheFileHandler, {"path": self.video_root}), ]) # Handlers self.app.add_handlers('.*$', [ (r'%s/robots.txt' % sickrage.app.config.web_root, RobotsDotTxtHandler), (r'%s/messages.po' % sickrage.app.config.web_root, MessagesDotPoHandler), (r'%s/quicksearch.json' % sickrage.app.config.web_root, QuicksearchDotJsonHandler), (r'%s/apibuilder(/?)' % sickrage.app.config.web_root, APIBulderHandler), (r'%s/setHomeLayout(/?)' % sickrage.app.config.web_root, SetHomeLayoutHandler), (r'%s/setPosterSortBy(/?)' % sickrage.app.config.web_root, SetPosterSortByHandler), (r'%s/setPosterSortDir(/?)' % sickrage.app.config.web_root, SetPosterSortDirHandler), (r'%s/setHistoryLayout(/?)' % sickrage.app.config.web_root, SetHistoryLayoutHandler), (r'%s/toggleDisplayShowSpecials(/?)' % sickrage.app.config.web_root, ToggleDisplayShowSpecialsHandler), (r'%s/toggleScheduleDisplayPaused(/?)' % sickrage.app.config.web_root, ToggleScheduleDisplayPausedHandler), (r'%s/setScheduleSort(/?)' % sickrage.app.config.web_root, SetScheduleSortHandler), (r'%s/forceSchedulerJob(/?)' % sickrage.app.config.web_root, ForceSchedulerJobHandler), (r'%s/announcements(/?)' % sickrage.app.config.web_root, AnnouncementsHandler), (r'%s/announcements/announcementCount(/?)' % sickrage.app.config.web_root, AnnouncementCountHandler), (r'%s/announcements/mark-seen(/?)' % sickrage.app.config.web_root, MarkAnnouncementSeenHandler), (r'%s/schedule(/?)' % sickrage.app.config.web_root, ScheduleHandler), (r'%s/setScheduleLayout(/?)' % sickrage.app.config.web_root, SetScheduleLayoutHandler), (r'%s/calendar(/?)' % sickrage.app.config.web_root, CalendarHandler), (r'%s/changelog(/?)' % sickrage.app.config.web_root, ChangelogHandler), (r'%s/account/link(/?)' % sickrage.app.config.web_root, AccountLinkHandler), (r'%s/account/unlink(/?)' % sickrage.app.config.web_root, AccountUnlinkHandler), (r'%s/account/is-linked(/?)' % sickrage.app.config.web_root, AccountIsLinkedHandler), (r'%s/history(/?)' % sickrage.app.config.web_root, HistoryHandler), (r'%s/history/clear(/?)' % sickrage.app.config.web_root, HistoryClearHandler), (r'%s/history/trim(/?)' % sickrage.app.config.web_root, HistoryTrimHandler), (r'%s/irc(/?)' % sickrage.app.config.web_root, IRCHandler), (r'%s/logs(/?)' % sickrage.app.config.web_root, LogsHandler), (r'%s/logs/errorCount(/?)' % sickrage.app.config.web_root, ErrorCountHandler), (r'%s/logs/warningCount(/?)' % sickrage.app.config.web_root, WarningCountHandler), (r'%s/logs/view(/?)' % sickrage.app.config.web_root, LogsViewHandler), (r'%s/logs/clearAll(/?)' % sickrage.app.config.web_root, LogsClearAllHanlder), (r'%s/logs/clearWarnings(/?)' % sickrage.app.config.web_root, LogsClearWarningsHanlder), (r'%s/logs/clearErrors(/?)' % sickrage.app.config.web_root, LogsClearErrorsHanlder), (r'%s/browser(/?)' % sickrage.app.config.web_root, WebFileBrowserHandler), (r'%s/browser/complete(/?)' % sickrage.app.config.web_root, WebFileBrowserCompleteHandler), (r'%s/home(/?)' % sickrage.app.config.web_root, HomeHandler), (r'%s/home/showProgress(/?)' % sickrage.app.config.web_root, ShowProgressHandler), (r'%s/home/is-alive(/?)' % sickrage.app.config.web_root, IsAliveHandler), (r'%s/home/testSABnzbd(/?)' % sickrage.app.config.web_root, TestSABnzbdHandler), (r'%s/home/testSynologyDSM(/?)' % sickrage.app.config.web_root, TestSynologyDSMHandler), (r'%s/home/testTorrent(/?)' % sickrage.app.config.web_root, TestTorrentHandler), (r'%s/home/testFreeMobile(/?)' % sickrage.app.config.web_root, TestFreeMobileHandler), (r'%s/home/testTelegram(/?)' % sickrage.app.config.web_root, TestTelegramHandler), (r'%s/home/testJoin(/?)' % sickrage.app.config.web_root, TestJoinHandler), (r'%s/home/testGrowl(/?)' % sickrage.app.config.web_root, TestGrowlHandler), (r'%s/home/testProwl(/?)' % sickrage.app.config.web_root, TestProwlHandler), (r'%s/home/testBoxcar2(/?)' % sickrage.app.config.web_root, TestBoxcar2Handler), (r'%s/home/testPushover(/?)' % sickrage.app.config.web_root, TestPushoverHandler), (r'%s/home/twitterStep1(/?)' % sickrage.app.config.web_root, TwitterStep1Handler), (r'%s/home/twitterStep2(/?)' % sickrage.app.config.web_root, TwitterStep2Handler), (r'%s/home/testTwitter(/?)' % sickrage.app.config.web_root, TestTwitterHandler), (r'%s/home/testTwilio(/?)' % sickrage.app.config.web_root, TestTwilioHandler), (r'%s/home/testSlack(/?)' % sickrage.app.config.web_root, TestSlackHandler), (r'%s/home/testAlexa(/?)' % sickrage.app.config.web_root, TestAlexaHandler), (r'%s/home/testDiscord(/?)' % sickrage.app.config.web_root, TestDiscordHandler), (r'%s/home/testKODI(/?)' % sickrage.app.config.web_root, TestKODIHandler), (r'%s/home/testPMC(/?)' % sickrage.app.config.web_root, TestPMCHandler), (r'%s/home/testPMS(/?)' % sickrage.app.config.web_root, TestPMSHandler), (r'%s/home/testLibnotify(/?)' % sickrage.app.config.web_root, TestLibnotifyHandler), (r'%s/home/testEMBY(/?)' % sickrage.app.config.web_root, TestEMBYHandler), (r'%s/home/testNMJ(/?)' % sickrage.app.config.web_root, TestNMJHandler), (r'%s/home/settingsNMJ(/?)' % sickrage.app.config.web_root, SettingsNMJHandler), (r'%s/home/testNMJv2(/?)' % sickrage.app.config.web_root, TestNMJv2Handler), (r'%s/home/settingsNMJv2(/?)' % sickrage.app.config.web_root, SettingsNMJv2Handler), (r'%s/home/getTraktToken(/?)' % sickrage.app.config.web_root, GetTraktTokenHandler), (r'%s/home/testTrakt(/?)' % sickrage.app.config.web_root, TestTraktHandler), (r'%s/home/loadShowNotifyLists(/?)' % sickrage.app.config.web_root, LoadShowNotifyListsHandler), (r'%s/home/saveShowNotifyList(/?)' % sickrage.app.config.web_root, SaveShowNotifyListHandler), (r'%s/home/testEmail(/?)' % sickrage.app.config.web_root, TestEmailHandler), (r'%s/home/testNMA(/?)' % sickrage.app.config.web_root, TestNMAHandler), (r'%s/home/testPushalot(/?)' % sickrage.app.config.web_root, TestPushalotHandler), (r'%s/home/testPushbullet(/?)' % sickrage.app.config.web_root, TestPushbulletHandler), (r'%s/home/getPushbulletDevices(/?)' % sickrage.app.config.web_root, GetPushbulletDevicesHandler), (r'%s/home/serverStatus(/?)' % sickrage.app.config.web_root, ServerStatusHandler), (r'%s/home/providerStatus(/?)' % sickrage.app.config.web_root, ProviderStatusHandler), (r'%s/home/shutdown(/?)' % sickrage.app.config.web_root, ShutdownHandler), (r'%s/home/restart(/?)' % sickrage.app.config.web_root, RestartHandler), (r'%s/home/updateCheck(/?)' % sickrage.app.config.web_root, UpdateCheckHandler), (r'%s/home/update(/?)' % sickrage.app.config.web_root, UpdateHandler), (r'%s/home/verifyPath(/?)' % sickrage.app.config.web_root, VerifyPathHandler), (r'%s/home/installRequirements(/?)' % sickrage.app.config.web_root, InstallRequirementsHandler), (r'%s/home/branchCheckout(/?)' % sickrage.app.config.web_root, BranchCheckoutHandler), (r'%s/home/displayShow(/?)' % sickrage.app.config.web_root, DisplayShowHandler), (r'%s/home/togglePause(/?)' % sickrage.app.config.web_root, TogglePauseHandler), (r'%s/home/deleteShow' % sickrage.app.config.web_root, DeleteShowHandler), (r'%s/home/refreshShow(/?)' % sickrage.app.config.web_root, RefreshShowHandler), (r'%s/home/updateShow(/?)' % sickrage.app.config.web_root, UpdateShowHandler), (r'%s/home/subtitleShow(/?)' % sickrage.app.config.web_root, SubtitleShowHandler), (r'%s/home/updateKODI(/?)' % sickrage.app.config.web_root, UpdateKODIHandler), (r'%s/home/updatePLEX(/?)' % sickrage.app.config.web_root, UpdatePLEXHandler), (r'%s/home/updateEMBY(/?)' % sickrage.app.config.web_root, UpdateEMBYHandler), (r'%s/home/syncTrakt(/?)' % sickrage.app.config.web_root, SyncTraktHandler), (r'%s/home/deleteEpisode(/?)' % sickrage.app.config.web_root, DeleteEpisodeHandler), (r'%s/home/testRename(/?)' % sickrage.app.config.web_root, TestRenameHandler), (r'%s/home/doRename(/?)' % sickrage.app.config.web_root, DoRenameHandler), (r'%s/home/searchEpisode(/?)' % sickrage.app.config.web_root, SearchEpisodeHandler), (r'%s/home/getManualSearchStatus(/?)' % sickrage.app.config.web_root, GetManualSearchStatusHandler), (r'%s/home/searchEpisodeSubtitles(/?)' % sickrage.app.config.web_root, SearchEpisodeSubtitlesHandler), (r'%s/home/setSceneNumbering(/?)' % sickrage.app.config.web_root, SetSceneNumberingHandler), (r'%s/home/retryEpisode(/?)' % sickrage.app.config.web_root, RetryEpisodeHandler), (r'%s/home/fetch_releasegroups(/?)' % sickrage.app.config.web_root, FetchReleasegroupsHandler), (r'%s/home/postprocess(/?)' % sickrage.app.config.web_root, HomePostProcessHandler), (r'%s/home/postprocess/processEpisode(/?)' % sickrage.app.config.web_root, HomeProcessEpisodeHandler), (r'%s/home/addShows(/?)' % sickrage.app.config.web_root, HomeAddShowsHandler), (r'%s/home/addShows/searchIndexersForShowName(/?)' % sickrage.app.config.web_root, SearchIndexersForShowNameHandler), (r'%s/home/addShows/massAddTable(/?)' % sickrage.app.config.web_root, MassAddTableHandler), (r'%s/home/addShows/newShow(/?)' % sickrage.app.config.web_root, NewShowHandler), (r'%s/home/addShows/traktShows(/?)' % sickrage.app.config.web_root, TraktShowsHandler), (r'%s/home/addShows/popularShows(/?)' % sickrage.app.config.web_root, PopularShowsHandler), (r'%s/home/addShows/addShowToBlacklist(/?)' % sickrage.app.config.web_root, AddShowToBlacklistHandler), (r'%s/home/addShows/existingShows(/?)' % sickrage.app.config.web_root, ExistingShowsHandler), (r'%s/home/addShows/addShowByID(/?)' % sickrage.app.config.web_root, AddShowByIDHandler), (r'%s/home/addShows/addNewShow(/?)' % sickrage.app.config.web_root, AddNewShowHandler), (r'%s/home/addShows/addExistingShows(/?)' % sickrage.app.config.web_root, AddExistingShowsHandler), (r'%s/manage(/?)' % sickrage.app.config.web_root, ManageHandler), (r'%s/manage/editShow(/?)' % sickrage.app.config.web_root, EditShowHandler), (r'%s/manage/showEpisodeStatuses(/?)' % sickrage.app.config.web_root, ShowEpisodeStatusesHandler), (r'%s/manage/episodeStatuses(/?)' % sickrage.app.config.web_root, EpisodeStatusesHandler), (r'%s/manage/changeEpisodeStatuses(/?)' % sickrage.app.config.web_root, ChangeEpisodeStatusesHandler), (r'%s/manage/setEpisodeStatus(/?)' % sickrage.app.config.web_root, SetEpisodeStatusHandler), (r'%s/manage/showSubtitleMissed(/?)' % sickrage.app.config.web_root, ShowSubtitleMissedHandler), (r'%s/manage/subtitleMissed(/?)' % sickrage.app.config.web_root, SubtitleMissedHandler), (r'%s/manage/downloadSubtitleMissed(/?)' % sickrage.app.config.web_root, DownloadSubtitleMissedHandler), (r'%s/manage/backlogShow(/?)' % sickrage.app.config.web_root, BacklogShowHandler), (r'%s/manage/backlogOverview(/?)' % sickrage.app.config.web_root, BacklogOverviewHandler), (r'%s/manage/massEdit(/?)' % sickrage.app.config.web_root, MassEditHandler), (r'%s/manage/massUpdate(/?)' % sickrage.app.config.web_root, MassUpdateHandler), (r'%s/manage/failedDownloads(/?)' % sickrage.app.config.web_root, FailedDownloadsHandler), (r'%s/manage/manageQueues(/?)' % sickrage.app.config.web_root, ManageQueuesHandler), (r'%s/manage/manageQueues/forceBacklogSearch(/?)' % sickrage.app.config.web_root, ForceBacklogSearchHandler), (r'%s/manage/manageQueues/forceDailySearch(/?)' % sickrage.app.config.web_root, ForceDailySearchHandler), (r'%s/manage/manageQueues/forceFindPropers(/?)' % sickrage.app.config.web_root, ForceFindPropersHandler), (r'%s/manage/manageQueues/pauseDailySearcher(/?)' % sickrage.app.config.web_root, PauseDailySearcherHandler), (r'%s/manage/manageQueues/pauseBacklogSearcher(/?)' % sickrage.app.config.web_root, PauseBacklogSearcherHandler), (r'%s/manage/manageQueues/pausePostProcessor(/?)' % sickrage.app.config.web_root, PausePostProcessorHandler), (r'%s/config(/?)' % sickrage.app.config.web_root, ConfigHandler), (r'%s/config/reset(/?)' % sickrage.app.config.web_root, ConfigResetHandler), (r'%s/config/anime(/?)' % sickrage.app.config.web_root, ConfigAnimeHandler), (r'%s/config/anime/saveAnime(/?)' % sickrage.app.config.web_root, ConfigSaveAnimeHandler), (r'%s/config/backuprestore(/?)' % sickrage.app.config.web_root, ConfigBackupRestoreHandler), (r'%s/config/backuprestore/backup(/?)' % sickrage.app.config.web_root, ConfigBackupHandler), (r'%s/config/backuprestore/restore(/?)' % sickrage.app.config.web_root, ConfigRestoreHandler), (r'%s/config/backuprestore/saveBackupRestore(/?)' % sickrage.app.config.web_root, SaveBackupRestoreHandler), (r'%s/config/general(/?)' % sickrage.app.config.web_root, ConfigGeneralHandler), (r'%s/config/general/generateApiKey(/?)' % sickrage.app.config.web_root, GenerateApiKeyHandler), (r'%s/config/general/saveRootDirs(/?)' % sickrage.app.config.web_root, SaveRootDirsHandler), (r'%s/config/general/saveAddShowDefaults(/?)' % sickrage.app.config.web_root, SaveAddShowDefaultsHandler), (r'%s/config/general/saveGeneral(/?)' % sickrage.app.config.web_root, SaveGeneralHandler), (r'%s/config/notifications(/?)' % sickrage.app.config.web_root, ConfigNotificationsHandler), (r'%s/config/notifications/saveNotifications(/?)' % sickrage.app.config.web_root, SaveNotificationsHandler), (r'%s/config/postProcessing(/?)' % sickrage.app.config.web_root, ConfigPostProcessingHandler), (r'%s/config/postProcessing/savePostProcessing(/?)' % sickrage.app.config.web_root, SavePostProcessingHandler), (r'%s/config/postProcessing/testNaming(/?)' % sickrage.app.config.web_root, TestNamingHandler), (r'%s/config/postProcessing/isNamingValid(/?)' % sickrage.app.config.web_root, IsNamingPatternValidHandler), (r'%s/config/postProcessing/isRarSupported(/?)' % sickrage.app.config.web_root, IsRarSupportedHandler), (r'%s/config/providers(/?)' % sickrage.app.config.web_root, ConfigProvidersHandler), (r'%s/config/providers/canAddNewznabProvider(/?)' % sickrage.app.config.web_root, CanAddNewznabProviderHandler), (r'%s/config/providers/canAddTorrentRssProvider(/?)' % sickrage.app.config.web_root, CanAddTorrentRssProviderHandler), (r'%s/config/providers/getNewznabCategories(/?)' % sickrage.app.config.web_root, GetNewznabCategoriesHandler), (r'%s/config/providers/saveProviders(/?)' % sickrage.app.config.web_root, SaveProvidersHandler), (r'%s/config/qualitySettings(/?)' % sickrage.app.config.web_root, ConfigQualitySettingsHandler), (r'%s/config/qualitySettings/saveQualities(/?)' % sickrage.app.config.web_root, SaveQualitiesHandler), (r'%s/config/search(/?)' % sickrage.app.config.web_root, ConfigSearchHandler), (r'%s/config/search/saveSearch(/?)' % sickrage.app.config.web_root, SaveSearchHandler), (r'%s/config/subtitles(/?)' % sickrage.app.config.web_root, ConfigSubtitlesHandler), (r'%s/config/subtitles/get_code(/?)' % sickrage.app.config.web_root, ConfigSubtitleGetCodeHandler), (r'%s/config/subtitles/wanted_languages(/?)' % sickrage.app.config.web_root, ConfigSubtitlesWantedLanguagesHandler), (r'%s/config/subtitles/saveSubtitles(/?)' % sickrage.app.config.web_root, SaveSubtitlesHandler), ]) # HTTPS Cert/Key object ssl_ctx = None if sickrage.app.config.enable_https: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(sickrage.app.config.https_cert, sickrage.app.config.https_key) # Web Server self.server = HTTPServer(self.app, ssl_options=ssl_ctx, xheaders=sickrage.app.config.handle_reverse_proxy) try: self.server.listen(sickrage.app.config.web_port) except socket.error as e: sickrage.app.log.warning(e.strerror) raise SystemExit self.io_loop.start() def shutdown(self): if self.started: self.started = False if self.server: self.server.close_all_connections() self.server.stop() if self.io_loop: self.io_loop.stop()
class CompatibilityTests(unittest.TestCase): def setUp(self): self.io_loop = IOLoop() self.reactor = TornadoReactor(self.io_loop) def tearDown(self): self.reactor.disconnectAll() self.io_loop.close(all_fds=True) def start_twisted_server(self): class HelloResource(Resource): isLeaf = True def render_GET(self, request): return "Hello from twisted!" site = Site(HelloResource()) self.twisted_port = get_unused_port() self.reactor.listenTCP(self.twisted_port, site, interface='127.0.0.1') def start_tornado_server(self): class HelloHandler(RequestHandler): def get(self): self.write("Hello from tornado!") app = Application([('/', HelloHandler)], log_function=lambda x: None) self.tornado_port = get_unused_port() app.listen(self.tornado_port, address='127.0.0.1', io_loop=self.io_loop) def run_ioloop(self): self.stop_loop = self.io_loop.stop self.io_loop.start() self.reactor.fireSystemEvent('shutdown') def run_reactor(self): self.stop_loop = self.reactor.stop self.stop = self.reactor.stop self.reactor.run() def tornado_fetch(self, url, runner): responses = [] client = AsyncHTTPClient(self.io_loop) def callback(response): responses.append(response) self.stop_loop() client.fetch(url, callback=callback) runner() self.assertEqual(len(responses), 1) responses[0].rethrow() return responses[0] def twisted_fetch(self, url, runner): # http://twistedmatrix.com/documents/current/web/howto/client.html chunks = [] client = Agent(self.reactor) d = client.request('GET', url) class Accumulator(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, data): chunks.append(data) def connectionLost(self, reason): self.finished.callback(None) def callback(response): finished = Deferred() response.deliverBody(Accumulator(finished)) return finished d.addCallback(callback) def shutdown(ignored): self.stop_loop() d.addBoth(shutdown) runner() self.assertTrue(chunks) return ''.join(chunks) def testTwistedServerTornadoClientIOLoop(self): self.start_twisted_server() response = self.tornado_fetch( 'http://localhost:%d' % self.twisted_port, self.run_ioloop) self.assertEqual(response.body, 'Hello from twisted!') def testTwistedServerTornadoClientReactor(self): self.start_twisted_server() response = self.tornado_fetch( 'http://localhost:%d' % self.twisted_port, self.run_reactor) self.assertEqual(response.body, 'Hello from twisted!') def testTornadoServerTwistedClientIOLoop(self): self.start_tornado_server() response = self.twisted_fetch( 'http://localhost:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!') def testTornadoServerTwistedClientReactor(self): self.start_tornado_server() response = self.twisted_fetch( 'http://localhost:%d' % self.tornado_port, self.run_reactor) self.assertEqual(response, 'Hello from tornado!')
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = IOLoop() self.pid = os.getpid() self.showlist = [] self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() self.config_file = None self.data_dir = None self.cache_dir = None self.quite = None self.no_launch = None self.web_port = None self.developer = None self.debug = None self.newest_version = None self.newest_version_string = None self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ( "1x02", "s01e02", "S01E02", "01x02", "S01 E02", ) self.naming_multi_ep_type = { 0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")] } self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format( platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [ language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language ] self.sys_encoding = get_sys_encoding() self.client_web_urls = {'torrent': '', 'newznab': ''} self.adba_connection = None self.notifier_providers = None self.metadata_providers = {} self.search_providers = None self.log = None self.config = None self.alerts = None self.main_db = None self.cache_db = None self.scheduler = None self.wserver = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.daily_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.oidc_client = None self.quicksearch_cache = None def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # patch modules with encoding kludge patch_modules() # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.scheduler = TornadoScheduler() self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect( client_id='sickrage-app', client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # upgrade database db.upgrade() # compact main database if self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = get_backlog_cycle_time() if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger(hours=self.config.version_updater_freq), name=self.version_updater.name, id=self.version_updater.name) # add network timezones updater job self.scheduler.add_job(update_network_dict, IntervalTrigger(days=1), name="TZUPDATER", id="TZUPDATER") # add show updater job self.scheduler.add_job(self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace( hour=self.config.showupdate_hour)), name=self.show_updater.name, id=self.show_updater.name) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger(minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.daily_searcher.name, id=self.daily_searcher.name) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger(minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)), name=self.backlog_searcher.name, id=self.backlog_searcher.name) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger(minutes=self.config.autopostprocessor_freq), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger(minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval]), name=self.proper_searcher.name, id=self.proper_searcher.name) # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.run, IntervalTrigger(hours=1), name=self.trakt_searcher.name, id=self.trakt_searcher.name) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger(hours=self.config.subtitle_searcher_freq), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime), name=self.upnp_client.name, id=self.upnp_client.name) # start scheduler service self.scheduler.start() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() # start ioloop self.io_loop.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver if self.wserver: self.wserver.shutdown() # shutdown show queue if self.show_queue: self.log.debug("Shutting down show queue") self.show_queue.shutdown() del self.show_queue # shutdown search queue if self.search_queue: self.log.debug("Shutting down search queue") self.search_queue.shutdown() del self.search_queue # shutdown post-processor queue if self.postprocessor_queue: self.log.debug("Shutting down post-processor queue") self.postprocessor_queue.shutdown() del self.postprocessor_queue # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save all show and config settings self.save_all() # close databases for db in [self.main_db, self.cache_db]: if db.opened: self.log.debug( "Shutting down {} database connection".format(db.name)) db.close() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False self.io_loop.stop() def save_all(self): # write all shows self.log.info("Saving all shows to the database") for show in self.showlist: try: show.saveToDB() except Exception: continue # save config self.config.save() def load_shows(self): """ Populates the showlist and quicksearch cache with shows and episodes from the database """ self.quicksearch_cache.load() for dbData in self.main_db.all('tv_shows'): try: self.log.debug("Loading data for show: [{}]".format( dbData['show_name'])) self.showlist.append( TVShow(int(dbData['indexer']), int(dbData['indexer_id']))) self.quicksearch_cache.add_show(dbData['indexer_id']) except Exception as e: self.log.debug("Show error in [%s]: %s" % (dbData['location'], str(e)))
class VimMessenger(threading.Thread): class Server(TCPServer): async def handle_stream(self, stream, address): while 1: try: # Save stream self.stream = stream # Recieve data data = await stream.read_until(b'@@@') data = data[:-3].decode('utf-8') data = json_decode(data) if data['type'] == 'start': # Start kernel logger.debug(data) kernel_thread = KernelHandler( base_url=self.jkg_handler.base_url, base_ws_url=self.jkg_handler.base_ws_url, bufnr=data['bufnr'], vim_messenger=self.jkg_handler.vim_messenger, kernel_id=None if 'kernel_id' not in data.keys() else data['kernel_id'], lang=None if 'lang' not in data.keys() else data['lang'], ) self.jkg_handler.kernel_threads.append(kernel_thread) kernel_thread.start() elif data['type'] == 'kill': # Kill kernel thread = self.jkg_handler.find_kernel_thread_by_id( data['kernel_id']) thread.ioloop.add_callback(lambda: thread.ws.close()) elif data['type'] == 'execute': # Execute on kernel kernel_thread = self.jkg_handler.find_kernel_thread_by_id( data['kernel_id']) kernel_thread.ioloop.add_callback( lambda: kernel_thread.ws.write_message( json_encode({ 'header': { 'username': '', 'version': '5.0', 'session': '', 'msg_id': data['msg_id'], 'msg_type': 'execute_request' }, 'parent_header': {}, 'channel': 'shell', 'content': { 'code': data['code'], 'silent': False, 'store_history': True, 'user_expressions': {}, 'allow_stdin': False }, 'metadata': {}, 'buffers': {} }))) elif data['type'] == 'complete': # Execute on kernel kernel_thread = self.jkg_handler.find_kernel_thread_by_id( data['kernel_id']) kernel_thread.ioloop.add_callback( lambda: kernel_thread.ws.write_message( json_encode({ 'header': { 'username': '', 'version': '5.0', 'session': '', 'msg_id': data['msg_id'], 'msg_type': 'complete_request' }, 'parent_header': {}, 'channel': 'shell', 'content': { 'code': data['code'], 'cursor_pos': data['cursor_pos'] }, 'metadata': {}, 'buffers': {} }))) except StreamClosedError: break def __init__(self, args): threading.Thread.__init__(self, name='VimMessenger') self._args = args self.jkg_handler = None def run(self): self.ioloop = IOLoop() self.ioloop.make_current() self.tcp_server = VimMessenger.Server() self.tcp_server.jkg_handler = self.jkg_handler # while not hasattr(self.jkg_handler, 'ws'): # sleep(0.1) self.tcp_server.listen(self._args.vim_port) logger.debug('Start VimMessenger') self.ioloop.start() logger.debug('Stopped VimMessenger')
def run(self): loop = IOLoop() app = Application([(r'/', WsSocketHandler)]) app.listen(self.port) loop.start()
class SnapServer(Application): OFF = "turnoff" def __init__(self, port, argparser=None): Application.__init__(self, argparser) self.port = int(port) self.flask = Flask(__name__) logging.basicConfig(level=logging.DEBUG) self.current_auth_nick = self.OFF self.nicknames = {} self.lock = RLock() CORS(self.flask) self.loop = None self.route() def signal_handler(self, signal, frame): print("Received SIGINT, closing...") if self.loop is not None: self.loop.stop() def route(self): self.flask.route('/admin', methods=['GET', 'POST'])(self.render_admin_page) self.flask.route('/admin/nicknames', methods=['GET'])(self.get_admin_nicknames) self.flask.route('/admin/active_nickname', methods=['GET'])(self.get_admin_active_nickname) self.flask.route('/set_rgb_matrix', methods=['POST'])(self.set_rgb_matrix) self.flask.route('/is_authorized/<nickname>', methods=['GET'])(self.is_authorized) self.flask.route('/authorize', methods=['POST'])(self.authorize) self.flask.route('/get_nickname', methods=['GET'])(self.get_nickname) @requires_auth def get_admin_active_nickname(self): return dumps(self.current_auth_nick) @requires_auth def get_admin_nicknames(self): return dumps(sorted(self.nicknames.keys(), key=lambda x: self.nicknames[x]["appeared"])) def check_nicknames_validity(self): with self.lock: temp_dict = {} for nick, timestamps in self.nicknames.items(): if time() - timestamps["last_seen"] < 20: temp_dict[nick] = timestamps else: if nick == self.current_auth_nick: self.current_auth_nick = self.OFF self.nicknames = temp_dict # Uncomment to require authentication #@requires_auth def render_admin_page(self): res = render_template('admin.html') return res def authorize(self): nick = request.get_data() with self.lock: if nick in list(self.nicknames.keys()) + [self.OFF]: self.current_auth_nick = nick self.erase_all() return '' @staticmethod def scale(v): return min(1, max(0., float(v)/255.)) def set_rgb_matrix(self): try: data = request.get_data().split(':') with self.lock: if data.pop(0) == self.current_auth_nick: nb_rows = 15 nb_cols = 20 r = 0 c = 0 while data: red = data.pop(0) green = data.pop(0) blue = data.pop(0) self.model.set_pixel(r, c, map(self.scale, [red, green, blue])) if c < nb_cols - 1: c += 1 else: c = 0 r += 1 except Exception as e: print(repr(e)) sys.exc_clear() return '' def erase_all(self): self.model.set_all('black') return '' def is_authorized(self, nickname): with self.lock: if nickname in self.nicknames: self.nicknames[nickname]["last_seen"] = time() # update user table self.check_nicknames_validity() return str(nickname == self.current_auth_nick) def get_nickname(self): rand_id = petname.generate() with self.lock: while rand_id in self.nicknames.keys(): rand_id = petname.generate() self.nicknames[rand_id] = {"appeared": time(), "last_seen": time()} return rand_id def run(self): # open('http://snap.berkeley.edu/run') signal.signal(signal.SIGINT, self.signal_handler) self.loop = IOLoop() http_server = HTTPServer(WSGIContainer(self.flask)) http_server.listen(self.port) self.loop.start()
class MockHub(JupyterHub): """Hub with various mock bits""" db_file = None confirm_no_ssl = True last_activity_interval = 2 @default('subdomain_host') def _subdomain_host_default(self): return os.environ.get('JUPYTERHUB_TEST_SUBDOMAIN_HOST', '') @default('ip') def _ip_default(self): return '127.0.0.1' @default('authenticator_class') def _authenticator_class_default(self): return MockPAMAuthenticator @default('spawner_class') def _spawner_class_default(self): return MockSpawner def init_signal(self): pass def start(self, argv=None): self.db_file = NamedTemporaryFile() self.pid_file = NamedTemporaryFile(delete=False).name self.db_url = self.db_file.name evt = threading.Event() @gen.coroutine def _start_co(): assert self.io_loop._running # put initialize in start for SQLAlchemy threading reasons yield super(MockHub, self).initialize(argv=argv) # add an initial user user = orm.User(name='user') self.db.add(user) self.db.commit() yield super(MockHub, self).start() yield self.hub.server.wait_up(http=True) self.io_loop.add_callback(evt.set) def _start(): self.io_loop = IOLoop() self.io_loop.make_current() self.io_loop.add_callback(_start_co) self.io_loop.start() self._thread = threading.Thread(target=_start) self._thread.start() ready = evt.wait(timeout=10) assert ready def stop(self): super().stop() self._thread.join() IOLoop().run_sync(self.cleanup) # ignore the call that will fire in atexit self.cleanup = lambda: None self.db_file.close() def login_user(self, name): base_url = public_url(self) r = requests.post( base_url + 'hub/login', data={ 'username': name, 'password': name, }, allow_redirects=False, ) r.raise_for_status() assert r.cookies return r.cookies
class Worker(Thread): rabbit_host = 'localhost' def __init__(self, exchange, bindings=[], event_io_loop=None, *args, **kwargs): super(Worker, self).__init__(*args, **kwargs) self.daemon = True self.exchange = exchange self.bindings = bindings self.event_io_loop = event_io_loop self.listening = AsyncEvent(io_loop=self.event_io_loop) def run(self, *args, **kwargs): self.io_loop = IOLoop() self.client = AsyncRabbitConnectionBase(host=self.rabbit_host, io_loop=self.io_loop) self.io_loop.add_callback(stack_context.wrap(self._start_client)) self.io_loop.start() @engine def _start_client(self): logger.info('Declaring worker exchange') yield Task(self.client.exchange_declare, exchange=self.exchange, exchange_type='topic', auto_delete=True) logger.info('Declaring worker queue') self.queue = yield Task(self.client.queue_declare, exclusive=True, auto_delete=True) logger.info('Binding worker keys') for routing_key in self.bindings: yield Task(self.client.queue_bind, queue=self.queue, exchange=self.exchange, routing_key=routing_key) logger.info('Starting worker message consumption') yield Task(self.client.basic_consume, consumer_callback=self._handle_message, queue=self.queue) self.listening.set() def _handle_message(self, channel, method, properties, body): self.handle_request(channel, method, properties, json.loads(body)) def handle_request(self, channel, method, properties, body): # This is an example implementation. Subclasses should override # handle_request # print "got request with cid: %s" % properties.correlation_id props = BasicProperties(correlation_id=properties.correlation_id) reply_body = json.dumps({'my': 'BODY'}) # Note that we don't provide an exchange here because the routing key # is setup as a "direct" key for RPC. channel.basic_publish(exchange='', routing_key=properties.reply_to, properties=props, body=reply_body) # print "sent reply to: %s" % properties.reply_to # Acknowledge the message was received and processed channel.basic_ack(method.delivery_tag)
logger.info('exiting...') self.is_closing = True def try_exit(self): if self.is_closing: IOLoop.instance().stop() logger.info('exit success') if __name__ == '__main__': tornado_options.parse_command_line() routes = [ URLSpec(r'/healthz', HealthzHandler), URLSpec(r"/metrics", MetricsHandler), URLSpec(r"/", FunctionHandler) ] logger.info("Server is starting") loop = uvloop.new_event_loop() asyncio.set_event_loop(loop) io_loop = IOLoop().current() app = KubelessApplication(routes) server = HTTPServer(app) server.bind(func_port, reuse_port=True) server.start() signal.signal(signal.SIGINT, app.signal_handler) signal.signal(signal.SIGTERM, app.signal_handler) PeriodicCallback(app.try_exit, 100).start() io_loop.start()
class MockHub(JupyterHub): """Hub with various mock bits""" db_file = None def _ip_default(self): return 'localhost' def _authenticator_class_default(self): return MockPAMAuthenticator def _spawner_class_default(self): return MockSpawner def init_signal(self): pass def start(self, argv=None): self.db_file = NamedTemporaryFile() self.db_url = 'sqlite:///' + self.db_file.name evt = threading.Event() @gen.coroutine def _start_co(): assert self.io_loop._running # put initialize in start for SQLAlchemy threading reasons yield super(MockHub, self).initialize(argv=argv) # add an initial user user = orm.User(name='user') self.db.add(user) self.db.commit() yield super(MockHub, self).start() yield self.hub.server.wait_up(http=True) self.io_loop.add_callback(evt.set) def _start(): self.io_loop = IOLoop() self.io_loop.make_current() self.io_loop.add_callback(_start_co) self.io_loop.start() self._thread = threading.Thread(target=_start) self._thread.start() ready = evt.wait(timeout=10) assert ready def stop(self): super().stop() self._thread.join() IOLoop().run_sync(self.cleanup) # ignore the call that will fire in atexit self.cleanup = lambda : None self.db_file.close() def login_user(self, name): r = requests.post(self.proxy.public_server.url + 'hub/login', data={ 'username': name, 'password': name, }, allow_redirects=False, ) assert r.cookies return r.cookies
class VCRProxyService(object): def __init__(self, cassette, unpatch): """ :param unpatch: A function returning a context manager which temporarily unpatches any monkey patched code so that a real request can be made. :param cassette: Cassette being played. """ self.unpatch = unpatch self.cassette = cassette self.io_loop = None self.thread = None self.tchannel = None self._running = threading.Event() @wrap_uncaught(reraise=( VCRProxy.CannotRecordInteractionsError, VCRProxy.RemoteServiceError, VCRProxy.VCRServiceError, )) @gen.coroutine def send(self, request, response): cassette = self.cassette request = request.args.request # TODO decode requests and responses based on arg scheme into more # readable formats. # Because Thrift doesn't handle UTF-8 correctly right now request.serviceName = request.serviceName.decode('utf-8') request.endpoint = request.endpoint.decode('utf-8') # TODO do we care about hostport being the same? if cassette.can_replay(request): vcr_response = cassette.replay(request) raise gen.Return(vcr_response) if cassette.write_protected: raise VCRProxy.CannotRecordInteractionsError( 'Could not find a matching response for request %s and the ' 'record mode %s prevents new interactions from being ' 'recorded. Your test may be performing an uenxpected ' 'request.' % (str(request), cassette.record_mode)) arg_scheme = VCRProxy.ArgScheme.to_name(request.argScheme).lower() with self.unpatch(): # TODO propagate other request and response parameters # TODO might make sense to tag all VCR requests with a protocol # header of some kind response_future = self.tchannel.request( service=request.serviceName, arg_scheme=arg_scheme, hostport=request.hostPort, ).send( request.endpoint, request.headers, request.body, headers={h.key: h.value for h in request.transportHeaders}, ) # Don't actually yield while everything is unpatched. try: response = yield response_future except TChannelError as e: raise VCRProxy.RemoteServiceError( code=e.code, message=e.message, ) response_headers = yield response.get_header() response_body = yield response.get_body() vcr_response = VCRProxy.Response( response.status_code, response_headers, response_body, ) cassette.record(request, vcr_response) raise gen.Return(vcr_response) @property def hostport(self): return self.tchannel.hostport def _run(self): self.io_loop = IOLoop() self.io_loop.make_current() self.tchannel = TChannel('proxy-server') self.tchannel.register(VCRProxy, handler=self.send) self.tchannel.listen() self._running.set() self.io_loop.start() def start(self): self.thread = threading.Thread(target=self._run) self.thread.start() self._running.wait(1) def stop(self): self.tchannel.close() self.tchannel = None self.io_loop.stop() self.io_loop = None self.thread.join(1) # seconds self.thread = None def __enter__(self): self.start() return self def __exit__(self, *args): self.stop()
class Client(object): def __init__(self, url, timeout, mode): self.url = url self.URL = "ws://localhost:8888/noble-markets-realtime-order-book" #local websocket server self.timeout = timeout self.ioloop = IOLoop() self.ws = None self.mode = mode#1 for bitfinex, 2 for gdax self.channelIdVal = {} self.connectLocal()#create connection to local WebSocket Server self.connect() PeriodicCallback(self.keep_alive, 20000, io_loop=self.ioloop).start() self.ioloop.start() @gen.coroutine def connectLocal(self): # for resuing sma ewebscoket connection, so as to prevent creation of multiple redundant connections print ("Establishing connection to Local Websocket server "+self.url) try: self.WS = yield websocket_connect(self.URL) except: print ("connection error "+self.URL) else: pass @gen.coroutine def connect(self): print ("Establishing connection to "+self.url) try: self.ws = yield websocket_connect(self.url) except: print ("connection error "+self.url) else: print ("connected to "+self.url) self.run() # code for subscribing goes here if self.mode==1: self.bitfinexSubscribe()#comment this line for debugging pass elif self.mode==2: self.gdaxSubscribe() pass # elif self.mode==0: # print ("you have entered the dungeon, exit now!!!!") # self.ioloop.stop() else: pass @gen.coroutine def run(self): while True:#look for new messages msg = yield self.ws.read_message() if msg is None: print ("connection closed for "+self.url) self.ws = None break else: message = json.loads(msg)#converting the response into json if self.mode == 1:# for bitfinex # msgs for bitfinex if 'event' in message: if message['event'] == "subscribed": self.channelIdVal[message['chanId']] = message['pair'] # storing the mapping for pair with its channel id else: # print(message) MSG = yield self.WS.read_message() if MSG is None:#can be replaced by while loop print ("connection closed for "+self.URL+ " trying again") self.connectLocal() else: BitfinexData(message, self.channelIdVal, self.WS) elif self.mode == 2:#for GDAX # if 'product_id' in message: # MSG = yield self.WS.read_message() # print(message) # if MSG is None:#can be replaced by while loop # print ("connection closed for "+self.URL+ " trying again") # print("I m inside") # self.connectLocal() # else: # # pass GdaxData(message, self.WS) # print(message) def bitfinexSubscribe(self): requestArticles = requests.get("https://api.bitfinex.com/v1/symbols")#fetching all the active pairs pairs = requestArticles.json() # print(pairs) for pair in pairs: #subscribing to all active pairs print("Subscribing to pair: "+pair) request = {} request['event'] = 'subscribe' request['channel'] = "book" request['pair'] = pair request['prec'] = "P1" request['freq'] = "F1" json_request = json.dumps(request) # print(json_request) self.ws.write_message(json_request) # break def gdaxSubscribe(self): requestArticles = requests.get("https://api.gdax.com/products")#fetching all the active product ids from gdax pairs = requestArticles.json() prodIds = [] for pair in pairs:# contructing list of the productids to be subscribed # print(pair['id']) prodIds.append(pair['id']) print("Subscribing to productids: "+pair['id']) # break request = {} request['type'] = 'subscribe' request['product_ids'] = prodIds request['channels'] = ["level2"] json_request = json.dumps(request) # print(json_request) self.ws.write_message(json_request) def keep_alive(self):#if connection goes down if self.ws is None: self.connect() else: pass
class CompatibilityTests(unittest.TestCase): def setUp(self): self.saved_signals = save_signal_handlers() self.io_loop = IOLoop() self.io_loop.make_current() self.reactor = TornadoReactor() def tearDown(self): self.reactor.disconnectAll() self.io_loop.clear_current() self.io_loop.close(all_fds=True) restore_signal_handlers(self.saved_signals) def start_twisted_server(self): class HelloResource(Resource): isLeaf = True def render_GET(self, request): return "Hello from twisted!" site = Site(HelloResource()) port = self.reactor.listenTCP(0, site, interface='127.0.0.1') self.twisted_port = port.getHost().port def start_tornado_server(self): class HelloHandler(RequestHandler): def get(self): self.write("Hello from tornado!") app = Application([('/', HelloHandler)], log_function=lambda x: None) server = HTTPServer(app) sock, self.tornado_port = bind_unused_port() server.add_sockets([sock]) def run_ioloop(self): self.stop_loop = self.io_loop.stop self.io_loop.start() self.reactor.fireSystemEvent('shutdown') def run_reactor(self): self.stop_loop = self.reactor.stop self.stop = self.reactor.stop self.reactor.run() def tornado_fetch(self, url, runner): responses = [] client = AsyncHTTPClient() def callback(response): responses.append(response) self.stop_loop() client.fetch(url, callback=callback) runner() self.assertEqual(len(responses), 1) responses[0].rethrow() return responses[0] def twisted_fetch(self, url, runner): # http://twistedmatrix.com/documents/current/web/howto/client.html chunks = [] client = Agent(self.reactor) d = client.request(b'GET', utf8(url)) class Accumulator(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, data): chunks.append(data) def connectionLost(self, reason): self.finished.callback(None) def callback(response): finished = Deferred() response.deliverBody(Accumulator(finished)) return finished d.addCallback(callback) def shutdown(failure): if hasattr(self, 'stop_loop'): self.stop_loop() elif failure is not None: # loop hasn't been initialized yet; try our best to # get an error message out. (the runner() interaction # should probably be refactored). try: failure.raiseException() except: logging.error('exception before starting loop', exc_info=True) d.addBoth(shutdown) runner() self.assertTrue(chunks) return ''.join(chunks) def twisted_coroutine_fetch(self, url, runner): body = [None] @gen.coroutine def f(): # This is simpler than the non-coroutine version, but it cheats # by reading the body in one blob instead of streaming it with # a Protocol. client = Agent(self.reactor) response = yield client.request(b'GET', utf8(url)) with warnings.catch_warnings(): # readBody has a buggy DeprecationWarning in Twisted 15.0: # https://twistedmatrix.com/trac/changeset/43379 warnings.simplefilter('ignore', category=DeprecationWarning) body[0] = yield readBody(response) self.stop_loop() self.io_loop.add_callback(f) runner() return body[0] def testTwistedServerTornadoClientIOLoop(self): self.start_twisted_server() response = self.tornado_fetch( 'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop) self.assertEqual(response.body, 'Hello from twisted!') def testTwistedServerTornadoClientReactor(self): self.start_twisted_server() response = self.tornado_fetch( 'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor) self.assertEqual(response.body, 'Hello from twisted!') def testTornadoServerTwistedClientIOLoop(self): self.start_tornado_server() response = self.twisted_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!') def testTornadoServerTwistedClientReactor(self): self.start_tornado_server() response = self.twisted_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor) self.assertEqual(response, 'Hello from tornado!') def testTornadoServerTwistedCoroutineClientIOLoop(self): self.start_tornado_server() response = self.twisted_coroutine_fetch( 'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop) self.assertEqual(response, 'Hello from tornado!')
class SyncHTTPClientTest(unittest.TestCase): def setUp(self): self.server_ioloop = IOLoop() event = threading.Event() @gen.coroutine def init_server(): sock, self.port = bind_unused_port() app = Application([("/", HelloWorldHandler)]) self.server = HTTPServer(app) self.server.add_socket(sock) event.set() def start(): self.server_ioloop.run_sync(init_server) self.server_ioloop.start() self.server_thread = threading.Thread(target=start) self.server_thread.start() event.wait() self.http_client = HTTPClient() def tearDown(self): def stop_server(): self.server.stop() # Delay the shutdown of the IOLoop by several iterations because # the server may still have some cleanup work left when # the client finishes with the response (this is noticeable # with http/2, which leaves a Future with an unexamined # StreamClosedError on the loop). @gen.coroutine def slow_stop(): yield self.server.close_all_connections() # The number of iterations is difficult to predict. Typically, # one is sufficient, although sometimes it needs more. for i in range(5): yield self.server_ioloop.stop() self.server_ioloop.add_callback(slow_stop) self.server_ioloop.add_callback(stop_server) self.server_thread.join() self.http_client.close() self.server_ioloop.close(all_fds=True) def get_url(self, path): return "http://127.0.0.1:%d%s" % (self.port, path) def test_sync_client(self): response = self.http_client.fetch(self.get_url("/")) self.assertEqual(b"Hello world!", response.body) def test_sync_client_error(self): # Synchronous HTTPClient raises errors directly; no need for # response.rethrow() with self.assertRaises(HTTPError) as assertion: self.http_client.fetch(self.get_url("/notfound")) self.assertEqual(assertion.exception.code, 404)
class TornadoServer(AbstractServer): """ Flexx Server implemented in Tornado. """ def __init__(self, host, port, new_loop): self._new_loop = new_loop self._app = None self._server = None self._get_io_loop() super().__init__(host, port) def _get_io_loop(self): # Get a new ioloop or the current ioloop for this thread if self._new_loop: self._loop = IOLoop() else: self._loop = IOLoop.current(instance=is_main_thread()) if self._loop is None: self._loop = IOLoop(make_current=True) def _open(self, host, port): # Note: does not get called if host is False. That way we can # run Flexx in e.g. JLab's application. # Create tornado application self._app = tornado.web.Application([ (r"/flexx/ws/(.*)", WSHandler), (r"/flexx/(.*)", MainHandler), (r"/(.*)", AppHandler), ]) # Create tornado server, bound to our own ioloop self._server = tornado.httpserver.HTTPServer(self._app, io_loop=self._loop) # Start server (find free port number if port not given) if port: # Turn port into int, use hashed port number if a string was given try: port = int(port) except ValueError: port = port_hash(port) self._server.listen(port, host) else: # Try N ports in a repeatable range (easier, browser history, etc.) prefered_port = port_hash('Flexx') for i in range(8): port = prefered_port + i try: self._server.listen(port, host) break except OSError: pass # address already in use else: # Ok, let Tornado figure out a port [sock] = netutil.bind_sockets(None, host, family=socket.AF_INET) self._server.add_sockets([sock]) port = sock.getsockname()[1] # Notify address, so its easy to e.g. copy and paste in the browser self._serving = self._app._flexx_serving = host, port logger.info('Serving apps at http://%s:%i/' % (host, port)) def _start(self): # Ensure that our loop is the current loop for this thread if self._new_loop: self._loop.make_current() elif IOLoop.current(instance=is_main_thread()) is not self._loop: raise RuntimeError( 'Server must use ioloop that is current to this thread.') # Make use of the semi-standard defined by IPython to determine # if the ioloop is "hijacked" (e.g. in Pyzo). There is no public # way to determine if a loop is already running, but the # AbstractServer class keeps track of this. if not getattr(self._loop, '_in_event_loop', False): self._loop.start() def _stop(self): # todo: explicitly close all websocket connections logger.debug('Stopping Tornado server') self._loop.stop() def _close(self): self._server.stop() def call_later(self, delay, callback, *args, **kwargs): # We use a wrapper func so that exceptions are processed via our # logging system. Also fixes that Tornado seems to close websockets # when an exception occurs (issue #164) though one could also # use ``with tornado.stack_context.NullContext()`` to make callbacks # be called more "independently". def wrapper(): try: callback(*args, **kwargs) except Exception as err: err.skip_tb = 1 logger.exception(err) if delay <= 0: self._loop.add_callback(wrapper) else: self._loop.call_later(delay, wrapper) @property def app(self): """ The Tornado Application object being used.""" return self._app @property def loop(self): """ The Tornado IOLoop object being used.""" return self._loop @property def server(self): """ The Tornado HttpServer object being used.""" return self._server
def serve_http(): global IOLOOP IOLOOP = IOLoop().current() http_server = HTTPServer(WSGIContainer(app)) http_server.listen(PORT) IOLOOP.start()
class TornadoServer(AbstractServer): """ Flexx Server implemented in Tornado. """ def __init__(self, host, port, new_loop): self._new_loop = new_loop super(TornadoServer, self).__init__(host, port) def _open(self, host, port): # Get a new ioloop or the current ioloop for this thread if self._new_loop: self._loop = IOLoop() else: self._loop = IOLoop.current(instance=is_main_thread()) if self._loop is None: self._loop = IOLoop(make_current=True) # Create tornado application self._app = tornado.web.Application([ (r"/(.*)/ws", WSHandler), (r"/(.*)", MainHandler), ]) # Create tornado server, bound to our own ioloop self._server = tornado.httpserver.HTTPServer(self._app, io_loop=self._loop) # Start server (find free port number if port not given) if port: # Turn port into int, use hashed port number if a string was given try: port = int(port) except ValueError: port = port_hash(port) self._server.listen(port, host) else: # Try N ports in a repeatable range (easier, browser history, etc.) prefered_port = port_hash('Flexx') for i in xrange(8): port = prefered_port + i try: self._server.listen(port, host) break except OSError: pass # address already in use else: # Ok, let Tornado figure out a port [sock] = netutil.bind_sockets(None, host, family=socket.AF_INET) self._server.add_sockets([sock]) port = sock.getsockname()[1] # Notify address, so its easy to e.g. copy and paste in the browser self._serving = self._app._flexx_serving = host, port logger.info('Serving apps at http://%s:%i/' % (host, port)) def _start(self): # Ensure that our loop is the current loop for this thread if self._new_loop: self._loop.make_current() elif IOLoop.current(instance=is_main_thread()) is not self._loop: raise RuntimeError( 'Server must use ioloop that is current to this thread.') # Make use of the semi-standard defined by IPython to determine # if the ioloop is "hijacked" (e.g. in Pyzo). There is no public # way to determine if a loop is already running, but the # AbstractServer class keeps track of this. if not getattr(self._loop, '_in_event_loop', False): self._loop.start() def _stop(self): # todo: explicitly close all websocket connections logger.debug('Stopping Tornado server') self._loop.stop() def _close(self): self._server.stop() def call_later(self, delay, callback, *args, **kwargs): if delay <= 0: self._loop.add_callback(callback, *args, **kwargs) else: self._loop.call_later(delay, callback, *args, **kwargs) @property def app(self): """ The Tornado Application object being used.""" return self._app @property def loop(self): """ The Tornado IOLoop object being used.""" return self._loop @property def server(self): """ The Tornado HttpServer object being used.""" return self._server
item_cache = ItemCache() application = tornado.web.Application([ (r"/", MainHandler, dict(item_cache=item_cache)), (r"/about", AboutHandler), (r"/search", SearchHandler), ], **settings) HOST = os.getenv('VCAP_APP_HOST', '0.0.0.0') PORT = int(os.getenv('VCAP_APP_PORT', '80')) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(PORT, HOST) ioloop = IOLoop().instance() sched = InitialPeriodicCallback(item_cache.update_local_file_database, 20 * MINUTE, 1 * SECOND, io_loop=ioloop) sched.start() if 'production' not in sys.argv: for root, dirs, files in os.walk('.', topdown=False): for name in files: if '#' not in name and 'DS_S' not in name and 'flymake' not in name and 'pyc' not in name: tornado.autoreload.watch(root + '/' + name) tornado.autoreload.start(ioloop) ioloop.start()
self.write_message(json.dumps(state)) def handle_command(self, message): pass def check_origin(self, origin): return True if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--port', type=int, default=9342) parser.add_argument('--verbose', action='store_true', default=False) args = parser.parse_args() loop = IOLoop() port = args.port FakeRobot.verbose = args.verbose FakeRobot.ioloop = loop app = Application([(r'/', FakeRobot)]) app.listen(port) url = 'ws://{}:{}'.format('127.0.0.1', port) if args.verbose: print('Fake robot serving on {}'.format(url)) loop.start()
class IOLoop(threading.Thread): _futures = [] loop_interval = 100 # ms loop_quit_wait = MAX_WAIT_SECONDS_BEFORE_SHUTDOWN # second def __init__(self): threading.Thread.__init__(self) self.ioloop = TornadoIOLoop() def run(self): logger.debug('ioloop starting') def add_features(): if not self._futures: pass else: need_add = self._futures[:] self._futures = [] for each in need_add: self.ioloop.add_future(each[0], each[1]) PeriodicCallback(add_features, self.loop_interval, self.ioloop).start() self.ioloop.start() def add_future(self, future, callback=None): def nothing(future, **kwargs): pass if callback is None: callback = nothing # else: # feature.add_done_callback(callback) self._futures.append((future, callback)) def add_periodic(self, feature, interval=1000): if self.ioloop._timeouts is None: self.ioloop._timeouts = [] PeriodicCallback(feature, interval, self.ioloop).start() def add_timeout(self, deadline, callback, *args, **kwargs): self.ioloop.add_timeout(deadline, callback, *args, **kwargs) def time(self): return self.ioloop.time() def quit(self): logger.info('begin to quit') self.ioloop.add_callback(self._quit) def _quit(self): """ :return: """ logger.info('Will shutdown in %s seconds ...', MAX_WAIT_SECONDS_BEFORE_SHUTDOWN) io_loop = self.ioloop deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN def stop_loop(): """ :return: """ now = time.time() step = 0.01 if now < deadline and (io_loop._callbacks or len(io_loop._timeouts) > 1): io_loop.add_timeout(max(now + step, deadline), stop_loop) else: io_loop.stop() io_loop.close() io_loop.clear_current() io_loop.clear_instance() logger.info('Shutdown') stop_loop()
class UIServer(threading.Thread): config = None started = False io_loop = None server = None app = None def __init__(self, unmanic_data_queues, foreman, developer): super(UIServer, self).__init__(name='UIServer') self.config = config.CONFIG() self.developer = developer self.data_queues = unmanic_data_queues self.logger = unmanic_data_queues["logging"].get_logger(self.name) self.inotifytasks = unmanic_data_queues["inotifytasks"] # TODO: Move all logic out of template calling to foreman. # Create methods here to handle the calls and rename to foreman self.foreman = foreman self.set_logging() # Add a singleton for handling the data queues for sending data to unmanic's other processes udq = UnmanicDataQueues() udq.set_unmanic_data_queues(unmanic_data_queues) def _log(self, message, message2='', level="info"): message = common.format_message(message, message2) getattr(self.logger, level)(message) def stop(self): if self.started: self.started = False if self.io_loop: self.io_loop.add_callback(self.io_loop.stop) def set_logging(self): if self.config and self.config.get_log_path(): # Create directory if not exists if not os.path.exists(self.config.get_log_path()): os.makedirs(self.config.get_log_path()) # Create file handler log_file = os.path.join(self.config.get_log_path(), 'tornado.log') file_handler = logging.handlers.TimedRotatingFileHandler( log_file, when='midnight', interval=1, backupCount=7) file_handler.setLevel(logging.INFO) # Set tornado.access logging to file. Disable propagation of logs tornado_access = logging.getLogger("tornado.access") if self.developer: tornado_access.setLevel(logging.DEBUG) else: tornado_access.setLevel(logging.INFO) tornado_access.addHandler(file_handler) tornado_access.propagate = False # Set tornado.application logging to file. Enable propagation of logs tornado_application = logging.getLogger("tornado.application") if self.developer: tornado_application.setLevel(logging.DEBUG) else: tornado_application.setLevel(logging.INFO) tornado_application.addHandler(file_handler) tornado_application.propagate = True # Send logs also to root logger (command line) # Set tornado.general logging to file. Enable propagation of logs tornado_general = logging.getLogger("tornado.general") if self.developer: tornado_general.setLevel(logging.DEBUG) else: tornado_general.setLevel(logging.INFO) tornado_general.addHandler(file_handler) tornado_general.propagate = True # Send logs also to root logger (command line) def update_tornado_settings(self): # Check if this is a development environment or not if self.developer: tornado_settings['autoreload'] = True def run(self): asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) self.started = True # Configure tornado server based on config self.update_tornado_settings() # Load the app self.app = self.make_web_app() # TODO: add support for HTTPS # Web Server self.server = HTTPServer( self.app, ssl_options=None, ) try: self.server.listen(int(self.config.UI_PORT)) except socket.error as e: self._log("Exception when setting WebUI port {}:".format( self.config.UI_PORT), message2=str(e), level="warning") raise SystemExit self.io_loop = IOLoop().current() self.io_loop.start() self.io_loop.close(True) self._log("Leaving UIServer loop...") def make_web_app(self): # Start with web application routes app = Application([ (r"/assets/(.*)", StaticFileHandler, dict(path=tornado_settings['static_path'])), (r"/dashboard/(.*)", MainUIRequestHandler, dict( data_queues=self.data_queues, foreman=self.foreman, )), (r"/dashws", DashboardWebSocket, dict( data_queues=self.data_queues, foreman=self.foreman, )), (r"/history/(.*)", HistoryUIRequestHandler, dict(data_queues=self.data_queues, )), (r"/plugins/(.*)", PluginsUIRequestHandler, dict(data_queues=self.data_queues, )), (r"/settings/(.*)", SettingsUIRequestHandler, dict(data_queues=self.data_queues, )), (r"/filebrowser/(.*)", ElementFileBrowserUIRequestHandler, dict(data_queues=self.data_queues, )), (r"/(.*)", RedirectHandler, dict(url="/dashboard/")), ], **tornado_settings) # Add API routes app.add_handlers(r'.*', [ (PathMatches(r"/api/.*"), APIRequestRouter(app)), ]) return app
class IOPubThread: """An object for sending IOPub messages in a background thread Prevents a blocking main thread from delaying output from threads. IOPubThread(pub_socket).background_socket is a Socket-API-providing object whose IO is always run in a thread. """ def __init__(self, socket, pipe=False): """Create IOPub thread Parameters ---------- socket : zmq.PUB Socket the socket on which messages will be sent. pipe : bool Whether this process should listen for IOPub messages piped from subprocesses. """ self.socket = socket self.background_socket = BackgroundSocket(self) self._master_pid = os.getpid() self._pipe_flag = pipe self.io_loop = IOLoop(make_current=False) if pipe: self._setup_pipe_in() self._local = threading.local() self._events: Deque[Callable[..., Any]] = deque() self._event_pipes: WeakSet[Any] = WeakSet() self._setup_event_pipe() self.thread = threading.Thread(target=self._thread_main, name="IOPub") self.thread.daemon = True self.thread.pydev_do_not_trace = True # type:ignore[attr-defined] self.thread.is_pydev_daemon_thread = True # type:ignore[attr-defined] self.thread.name = "IOPub" def _thread_main(self): """The inner loop that's actually run in a thread""" self.io_loop.start() self.io_loop.close(all_fds=True) def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" ctx = self.socket.context pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode("ascii") iface = self._event_interface = "inproc://%s" % _uuid pipe_in.bind(iface) self._event_puller = ZMQStream(pipe_in, self.io_loop) self._event_puller.on_recv(self._handle_event) @property def _event_pipe(self): """thread-local event pipe for signaling events that should be processed in the thread""" try: event_pipe = self._local.event_pipe except AttributeError: # new thread, new event pipe ctx = self.socket.context event_pipe = ctx.socket(zmq.PUSH) event_pipe.linger = 0 event_pipe.connect(self._event_interface) self._local.event_pipe = event_pipe # WeakSet so that event pipes will be closed by garbage collection # when their threads are terminated self._event_pipes.add(event_pipe) return event_pipe def _handle_event(self, msg): """Handle an event on the event pipe Content of the message is ignored. Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ # freeze event count so new writes don't extend the queue # while we are processing n_events = len(self._events) for _ in range(n_events): event_f = self._events.popleft() event_f() def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" ctx = self.socket.context # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) pipe_in = ctx.socket(zmq.PULL) pipe_in.linger = 0 try: self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1") except zmq.ZMQError as e: warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e + "\nsubprocess output will be unavailable.") self._pipe_flag = False pipe_in.close() return self._pipe_in = ZMQStream(pipe_in, self.io_loop) self._pipe_in.on_recv(self._handle_pipe_msg) def _handle_pipe_msg(self, msg): """handle a pipe message from a subprocess""" if not self._pipe_flag or not self._is_master_process(): return if msg[0] != self._pipe_uuid: print("Bad pipe message: %s", msg, file=sys.__stderr__) return self.send_multipart(msg[1:]) def _setup_pipe_out(self): # must be new context after fork ctx = zmq.Context() pipe_out = ctx.socket(zmq.PUSH) pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port) return ctx, pipe_out def _is_master_process(self): return os.getpid() == self._master_pid def _check_mp_mode(self): """check for forks, and switch to zmq pipeline if necessary""" if not self._pipe_flag or self._is_master_process(): return MASTER else: return CHILD def start(self): """Start the IOPub thread""" self.thread.name = "IOPub" self.thread.start() # make sure we don't prevent process exit # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be. atexit.register(self.stop) def stop(self): """Stop the IOPub thread""" if not self.thread.is_alive(): return self.io_loop.add_callback(self.io_loop.stop) self.thread.join() # close *all* event pipes, created in any thread # event pipes can only be used from other threads while self.thread.is_alive() # so after thread.join, this should be safe for event_pipe in self._event_pipes: event_pipe.close() def close(self): if self.closed: return self.socket.close() self.socket = None @property def closed(self): return self.socket is None def schedule(self, f): """Schedule a function to be called in our IO thread. If the thread is not running, call immediately. """ if self.thread.is_alive(): self._events.append(f) # wake event thread (message content is ignored) self._event_pipe.send(b"") else: f() def send_multipart(self, *args, **kwargs): """send_multipart schedules actual zmq send in my thread. If my thread isn't running (e.g. forked process), send immediately. """ self.schedule(lambda: self._really_send(*args, **kwargs)) def _really_send(self, msg, *args, **kwargs): """The callback that actually sends messages""" if self.closed: return mp_mode = self._check_mp_mode() if mp_mode != CHILD: # we are master, do a regular send self.socket.send_multipart(msg, *args, **kwargs) else: # we are a child, pipe to master # new context/socket for every pipe-out # since forks don't teardown politely, use ctx.term to ensure send has completed ctx, pipe_out = self._setup_pipe_out() pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs) pipe_out.close() ctx.term()
class WebServer(threading.Thread): def __init__(self): super(WebServer, self).__init__() self.name = "TORNADO" self.daemon = True self.started = False self.handlers = {} self.video_root = None self.api_v1_root = None self.api_v2_root = None self.app = None self.server = None self.io_loop = None def run(self): self.started = True self.io_loop = IOLoop() # load languages tornado.locale.load_gettext_translations(sickrage.LOCALE_DIR, 'messages') # Check configured web port is correct if sickrage.app.config.general.web_port < 21 or sickrage.app.config.general.web_port > 65535: sickrage.app.config.general.web_port = 8081 # clear mako cache folder mako_cache = os.path.join(sickrage.app.cache_dir, 'mako') if os.path.isdir(mako_cache): shutil.rmtree(mako_cache, ignore_errors=True) # video root if sickrage.app.config.general.root_dirs: root_dirs = sickrage.app.config.general.root_dirs.split('|') self.video_root = root_dirs[int(root_dirs[0]) + 1] # web root if sickrage.app.config.general.web_root: sickrage.app.config.general.web_root = sickrage.app.config.general.web_root = ( '/' + sickrage.app.config.general.web_root.lstrip('/').strip('/')) # api root self.api_v1_root = fr'{sickrage.app.config.general.web_root}/api/(?:v1/)?{sickrage.app.config.general.api_v1_key}' self.api_v2_root = fr'{sickrage.app.config.general.web_root}/api/v2' # tornado setup if sickrage.app.config.general.enable_https: # If either the HTTPS certificate or key do not exist, make some self-signed ones. if not create_https_certificates( sickrage.app.config.general.https_cert, sickrage.app.config.general.https_key): sickrage.app.log.info( "Unable to create CERT/KEY files, disabling HTTPS") sickrage.app.config.general.enable_https = False if not (os.path.exists(sickrage.app.config.general.https_cert) and os.path.exists(sickrage.app.config.general.https_key)): sickrage.app.log.warning( "Disabled HTTPS because of missing CERT and KEY files") sickrage.app.config.general.enable_https = False # Load templates mako_lookup = TemplateLookup(directories=[sickrage.app.gui_views_dir], module_directory=os.path.join( sickrage.app.cache_dir, 'mako'), filesystem_checks=True, strict_undefined=True, input_encoding='utf-8', output_encoding='utf-8', encoding_errors='replace') templates = {} for root, dirs, files in os.walk(sickrage.app.gui_views_dir): path = root.split(os.sep) for x in sickrage.app.gui_views_dir.split(os.sep): if x in path: del path[path.index(x)] for file in files: filename = '{}/{}'.format('/'.join(path), file).lstrip('/') templates[filename] = mako_lookup.get_template(filename) # Websocket handler self.handlers['websocket_handlers'] = [ (fr'{sickrage.app.config.general.web_root}/ws/ui', WebSocketUIHandler) ] # API v1 Handlers self.handlers['api_v1_handlers'] = [ # api (fr'{self.api_v1_root}(/?.*)', ApiHandler), # api builder (fr'{sickrage.app.config.general.web_root}/api/builder', RedirectHandler, { "url": sickrage.app.config.general.web_root + '/apibuilder/' }), ] # API v2 Handlers self.handlers['api_v2_handlers'] = [ (fr'{self.api_v2_root}/ping', ApiPingHandler), (fr'{self.api_v2_root}/swagger.json', ApiSwaggerDotJsonHandler, { 'api_handlers': 'api_v2_handlers', 'api_version': '2.0.0' }), (fr'{self.api_v2_root}/config', ApiV2ConfigHandler), (fr'{self.api_v2_root}/file-browser', ApiV2FileBrowserHandler), (fr'{self.api_v2_root}/postprocess', Apiv2PostProcessHandler), (fr'{self.api_v2_root}/retrieve-series-metadata', ApiV2RetrieveSeriesMetadataHandler), (fr'{self.api_v2_root}/schedule', ApiV2ScheduleHandler), (fr'{self.api_v2_root}/series-providers', ApiV2SeriesProvidersHandler), (fr'{self.api_v2_root}/series-providers/([a-z]+)/search', ApiV2SeriesProvidersSearchHandler), (fr'{self.api_v2_root}/series-providers/([a-z]+)/languages', ApiV2SeriesProvidersLanguagesHandler), (fr'{self.api_v2_root}/series', ApiV2SeriesHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)', ApiV2SeriesHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/episodes', ApiV2SeriesEpisodesHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/images', ApiV2SeriesImagesHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/imdb-info', ApiV2SeriesImdbInfoHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/blacklist', ApiV2SeriesBlacklistHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/whitelist', ApiV2SeriesWhitelistHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/refresh', ApiV2SeriesRefreshHandler), (fr'{self.api_v2_root}/series/(\d+[-][a-z]+)/update', ApiV2SeriesUpdateHandler), (fr'{self.api_v2_root}/episodes/rename', ApiV2EpisodesRenameHandler), (fr'{self.api_v2_root}/episodes/(\d+[-][a-z]+)/search', ApiV2EpisodesManualSearchHandler), ] # New UI Static File Handlers self.handlers['new_ui_static_file_handlers'] = [ # media (fr'{sickrage.app.config.general.web_root}/app/static/media/(.*)', StaticImageHandler, { "path": os.path.join(sickrage.app.gui_app_dir, 'static', 'media') }), # css (fr'{sickrage.app.config.general.web_root}/app/static/css/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_app_dir, 'static', 'css') }), # js (fr'{sickrage.app.config.general.web_root}/app/static/js/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_app_dir, 'static', 'js') }), # base (fr"{sickrage.app.config.general.web_root}/app/(.*)", tornado.web.StaticFileHandler, { "path": sickrage.app.gui_app_dir, "default_filename": "index.html" }) ] # Static File Handlers self.handlers['static_file_handlers'] = [ # redirect to home (fr"({sickrage.app.config.general.web_root})(/?)", RedirectHandler, { "url": f"{sickrage.app.config.general.web_root}/home" }), # login (fr'{sickrage.app.config.general.web_root}/login(/?)', LoginHandler ), # logout (fr'{sickrage.app.config.general.web_root}/logout(/?)', LogoutHandler), # favicon (fr'{sickrage.app.config.general.web_root}/(favicon\.ico)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'images/favicon.ico') }), # images (fr'{sickrage.app.config.general.web_root}/images/(.*)', StaticImageHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'images') }), # css (fr'{sickrage.app.config.general.web_root}/css/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'css') }), # scss (fr'{sickrage.app.config.general.web_root}/scss/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'scss') }), # fonts (fr'{sickrage.app.config.general.web_root}/fonts/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'fonts') }), # javascript (fr'{sickrage.app.config.general.web_root}/js/(.*)', StaticNoCacheFileHandler, { "path": os.path.join(sickrage.app.gui_static_dir, 'js') }), # videos (fr'{sickrage.app.config.general.web_root}/videos/(.*)', StaticNoCacheFileHandler, { "path": self.video_root }), ] # Handlers self.handlers['web_handlers'] = [ (fr'{sickrage.app.config.general.web_root}/robots.txt', RobotsDotTxtHandler), (fr'{sickrage.app.config.general.web_root}/messages.po', MessagesDotPoHandler), (fr'{sickrage.app.config.general.web_root}/quicksearch.json', QuicksearchDotJsonHandler), (fr'{sickrage.app.config.general.web_root}/apibuilder(/?)', APIBulderHandler), (fr'{sickrage.app.config.general.web_root}/setHomeLayout(/?)', SetHomeLayoutHandler), (fr'{sickrage.app.config.general.web_root}/setPosterSortBy(/?)', SetPosterSortByHandler), (fr'{sickrage.app.config.general.web_root}/setPosterSortDir(/?)', SetPosterSortDirHandler), (fr'{sickrage.app.config.general.web_root}/setHistoryLayout(/?)', SetHistoryLayoutHandler), (fr'{sickrage.app.config.general.web_root}/toggleDisplayShowSpecials(/?)', ToggleDisplayShowSpecialsHandler), (fr'{sickrage.app.config.general.web_root}/toggleScheduleDisplayPaused(/?)', ToggleScheduleDisplayPausedHandler), (fr'{sickrage.app.config.general.web_root}/setScheduleSort(/?)', SetScheduleSortHandler), (fr'{sickrage.app.config.general.web_root}/forceSchedulerJob(/?)', ForceSchedulerJobHandler), (fr'{sickrage.app.config.general.web_root}/announcements(/?)', AnnouncementsHandler), (fr'{sickrage.app.config.general.web_root}/announcements/announcementCount(/?)', AnnouncementCountHandler), (fr'{sickrage.app.config.general.web_root}/announcements/mark-seen(/?)', MarkAnnouncementSeenHandler), (fr'{sickrage.app.config.general.web_root}/schedule(/?)', ScheduleHandler), (fr'{sickrage.app.config.general.web_root}/setScheduleLayout(/?)', SetScheduleLayoutHandler), (fr'{sickrage.app.config.general.web_root}/calendar(/?)', CalendarHandler), (fr'{sickrage.app.config.general.web_root}/changelog(/?)', ChangelogHandler), (fr'{sickrage.app.config.general.web_root}/account/link(/?)', AccountLinkHandler), (fr'{sickrage.app.config.general.web_root}/account/unlink(/?)', AccountUnlinkHandler), (fr'{sickrage.app.config.general.web_root}/account/is-linked(/?)', AccountIsLinkedHandler), (fr'{sickrage.app.config.general.web_root}/history(/?)', HistoryHandler), (fr'{sickrage.app.config.general.web_root}/history/clear(/?)', HistoryClearHandler), (fr'{sickrage.app.config.general.web_root}/history/trim(/?)', HistoryTrimHandler), (fr'{sickrage.app.config.general.web_root}/logs(/?)', LogsHandler), (fr'{sickrage.app.config.general.web_root}/logs/errorCount(/?)', ErrorCountHandler), (fr'{sickrage.app.config.general.web_root}/logs/warningCount(/?)', WarningCountHandler), (fr'{sickrage.app.config.general.web_root}/logs/view(/?)', LogsViewHandler), (fr'{sickrage.app.config.general.web_root}/logs/clearAll(/?)', LogsClearAllHanlder), (fr'{sickrage.app.config.general.web_root}/logs/clearWarnings(/?)', LogsClearWarningsHanlder), (fr'{sickrage.app.config.general.web_root}/logs/clearErrors(/?)', LogsClearErrorsHanlder), (fr'{sickrage.app.config.general.web_root}/browser(/?)', WebFileBrowserHandler), (fr'{sickrage.app.config.general.web_root}/browser/complete(/?)', WebFileBrowserCompleteHandler), (fr'{sickrage.app.config.general.web_root}/home(/?)', HomeHandler), (fr'{sickrage.app.config.general.web_root}/home/showProgress(/?)', ShowProgressHandler), (fr'{sickrage.app.config.general.web_root}/home/is-alive(/?)', IsAliveHandler), (fr'{sickrage.app.config.general.web_root}/home/testSABnzbd(/?)', TestSABnzbdHandler), (fr'{sickrage.app.config.general.web_root}/home/testSynologyDSM(/?)', TestSynologyDSMHandler), (fr'{sickrage.app.config.general.web_root}/home/testTorrent(/?)', TestTorrentHandler), (fr'{sickrage.app.config.general.web_root}/home/testFreeMobile(/?)', TestFreeMobileHandler), (fr'{sickrage.app.config.general.web_root}/home/testTelegram(/?)', TestTelegramHandler), (fr'{sickrage.app.config.general.web_root}/home/testJoin(/?)', TestJoinHandler), (fr'{sickrage.app.config.general.web_root}/home/testGrowl(/?)', TestGrowlHandler), (fr'{sickrage.app.config.general.web_root}/home/testProwl(/?)', TestProwlHandler), (fr'{sickrage.app.config.general.web_root}/home/testBoxcar2(/?)', TestBoxcar2Handler), (fr'{sickrage.app.config.general.web_root}/home/testPushover(/?)', TestPushoverHandler), (fr'{sickrage.app.config.general.web_root}/home/twitterStep1(/?)', TwitterStep1Handler), (fr'{sickrage.app.config.general.web_root}/home/twitterStep2(/?)', TwitterStep2Handler), (fr'{sickrage.app.config.general.web_root}/home/testTwitter(/?)', TestTwitterHandler), (fr'{sickrage.app.config.general.web_root}/home/testTwilio(/?)', TestTwilioHandler), (fr'{sickrage.app.config.general.web_root}/home/testSlack(/?)', TestSlackHandler), (fr'{sickrage.app.config.general.web_root}/home/testAlexa(/?)', TestAlexaHandler), (fr'{sickrage.app.config.general.web_root}/home/testDiscord(/?)', TestDiscordHandler), (fr'{sickrage.app.config.general.web_root}/home/testKODI(/?)', TestKODIHandler), (fr'{sickrage.app.config.general.web_root}/home/testPMC(/?)', TestPMCHandler), (fr'{sickrage.app.config.general.web_root}/home/testPMS(/?)', TestPMSHandler), (fr'{sickrage.app.config.general.web_root}/home/testLibnotify(/?)', TestLibnotifyHandler), (fr'{sickrage.app.config.general.web_root}/home/testEMBY(/?)', TestEMBYHandler), (fr'{sickrage.app.config.general.web_root}/home/testNMJ(/?)', TestNMJHandler), (fr'{sickrage.app.config.general.web_root}/home/settingsNMJ(/?)', SettingsNMJHandler), (fr'{sickrage.app.config.general.web_root}/home/testNMJv2(/?)', TestNMJv2Handler), (fr'{sickrage.app.config.general.web_root}/home/settingsNMJv2(/?)', SettingsNMJv2Handler), (fr'{sickrage.app.config.general.web_root}/home/getTraktToken(/?)', GetTraktTokenHandler), (fr'{sickrage.app.config.general.web_root}/home/testTrakt(/?)', TestTraktHandler), (fr'{sickrage.app.config.general.web_root}/home/loadShowNotifyLists(/?)', LoadShowNotifyListsHandler), (fr'{sickrage.app.config.general.web_root}/home/saveShowNotifyList(/?)', SaveShowNotifyListHandler), (fr'{sickrage.app.config.general.web_root}/home/testEmail(/?)', TestEmailHandler), (fr'{sickrage.app.config.general.web_root}/home/testNMA(/?)', TestNMAHandler), (fr'{sickrage.app.config.general.web_root}/home/testPushalot(/?)', TestPushalotHandler), (fr'{sickrage.app.config.general.web_root}/home/testPushbullet(/?)', TestPushbulletHandler), (fr'{sickrage.app.config.general.web_root}/home/getPushbulletDevices(/?)', GetPushbulletDevicesHandler), (fr'{sickrage.app.config.general.web_root}/home/serverStatus(/?)', ServerStatusHandler), (fr'{sickrage.app.config.general.web_root}/home/providerStatus(/?)', ProviderStatusHandler), (fr'{sickrage.app.config.general.web_root}/home/shutdown(/?)', ShutdownHandler), (fr'{sickrage.app.config.general.web_root}/home/restart(/?)', RestartHandler), (fr'{sickrage.app.config.general.web_root}/home/updateCheck(/?)', UpdateCheckHandler), (fr'{sickrage.app.config.general.web_root}/home/update(/?)', UpdateHandler), (fr'{sickrage.app.config.general.web_root}/home/verifyPath(/?)', VerifyPathHandler), (fr'{sickrage.app.config.general.web_root}/home/installRequirements(/?)', InstallRequirementsHandler), (fr'{sickrage.app.config.general.web_root}/home/branchCheckout(/?)', BranchCheckoutHandler), (fr'{sickrage.app.config.general.web_root}/home/displayShow(/?)', DisplayShowHandler), (fr'{sickrage.app.config.general.web_root}/home/togglePause(/?)', TogglePauseHandler), (fr'{sickrage.app.config.general.web_root}/home/deleteShow', DeleteShowHandler), (fr'{sickrage.app.config.general.web_root}/home/refreshShow(/?)', RefreshShowHandler), (fr'{sickrage.app.config.general.web_root}/home/updateShow(/?)', UpdateShowHandler), (fr'{sickrage.app.config.general.web_root}/home/subtitleShow(/?)', SubtitleShowHandler), (fr'{sickrage.app.config.general.web_root}/home/updateKODI(/?)', UpdateKODIHandler), (fr'{sickrage.app.config.general.web_root}/home/updatePLEX(/?)', UpdatePLEXHandler), (fr'{sickrage.app.config.general.web_root}/home/updateEMBY(/?)', UpdateEMBYHandler), (fr'{sickrage.app.config.general.web_root}/home/syncTrakt(/?)', SyncTraktHandler), (fr'{sickrage.app.config.general.web_root}/home/deleteEpisode(/?)', DeleteEpisodeHandler), (fr'{sickrage.app.config.general.web_root}/home/testRename(/?)', TestRenameHandler), (fr'{sickrage.app.config.general.web_root}/home/doRename(/?)', DoRenameHandler), (fr'{sickrage.app.config.general.web_root}/home/searchEpisode(/?)', SearchEpisodeHandler), (fr'{sickrage.app.config.general.web_root}/home/getManualSearchStatus(/?)', GetManualSearchStatusHandler), (fr'{sickrage.app.config.general.web_root}/home/searchEpisodeSubtitles(/?)', SearchEpisodeSubtitlesHandler), (fr'{sickrage.app.config.general.web_root}/home/setSceneNumbering(/?)', SetSceneNumberingHandler), (fr'{sickrage.app.config.general.web_root}/home/retryEpisode(/?)', RetryEpisodeHandler), (fr'{sickrage.app.config.general.web_root}/home/fetch_releasegroups(/?)', FetchReleasegroupsHandler), (fr'{sickrage.app.config.general.web_root}/home/postprocess(/?)', HomePostProcessHandler), (fr'{sickrage.app.config.general.web_root}/home/postprocess/processEpisode(/?)', HomeProcessEpisodeHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows(/?)', HomeAddShowsHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/searchSeriesProviderForShowName(/?)', SearchSeriesProviderForShowNameHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/massAddTable(/?)', MassAddTableHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/newShow(/?)', NewShowHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/traktShows(/?)', TraktShowsHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/popularShows(/?)', PopularShowsHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/addShowToBlacklist(/?)', AddShowToBlacklistHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/existingShows(/?)', ExistingShowsHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/addShowByID(/?)', AddShowByIDHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/addNewShow(/?)', AddNewShowHandler), (fr'{sickrage.app.config.general.web_root}/home/addShows/addExistingShows(/?)', AddExistingShowsHandler), (fr'{sickrage.app.config.general.web_root}/manage(/?)', ManageHandler), (fr'{sickrage.app.config.general.web_root}/manage/editShow(/?)', EditShowHandler), (fr'{sickrage.app.config.general.web_root}/manage/showEpisodeStatuses(/?)', ShowEpisodeStatusesHandler), (fr'{sickrage.app.config.general.web_root}/manage/episodeStatuses(/?)', EpisodeStatusesHandler), (fr'{sickrage.app.config.general.web_root}/manage/changeEpisodeStatuses(/?)', ChangeEpisodeStatusesHandler), (fr'{sickrage.app.config.general.web_root}/manage/setEpisodeStatus(/?)', SetEpisodeStatusHandler), (fr'{sickrage.app.config.general.web_root}/manage/showSubtitleMissed(/?)', ShowSubtitleMissedHandler), (fr'{sickrage.app.config.general.web_root}/manage/subtitleMissed(/?)', SubtitleMissedHandler), (fr'{sickrage.app.config.general.web_root}/manage/downloadSubtitleMissed(/?)', DownloadSubtitleMissedHandler), (fr'{sickrage.app.config.general.web_root}/manage/backlogShow(/?)', BacklogShowHandler), (fr'{sickrage.app.config.general.web_root}/manage/backlogOverview(/?)', BacklogOverviewHandler), (fr'{sickrage.app.config.general.web_root}/manage/massEdit(/?)', MassEditHandler), (fr'{sickrage.app.config.general.web_root}/manage/massUpdate(/?)', MassUpdateHandler), (fr'{sickrage.app.config.general.web_root}/manage/failedDownloads(/?)', FailedDownloadsHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues(/?)', ManageQueuesHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/forceBacklogSearch(/?)', ForceBacklogSearchHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/forceDailySearch(/?)', ForceDailySearchHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/forceFindPropers(/?)', ForceFindPropersHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/pauseDailySearcher(/?)', PauseDailySearcherHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/pauseBacklogSearcher(/?)', PauseBacklogSearcherHandler), (fr'{sickrage.app.config.general.web_root}/manage/manageQueues/pausePostProcessor(/?)', PausePostProcessorHandler), (fr'{sickrage.app.config.general.web_root}/config(/?)', ConfigWebHandler), (fr'{sickrage.app.config.general.web_root}/config/reset(/?)', ConfigResetHandler), (fr'{sickrage.app.config.general.web_root}/config/anime(/?)', ConfigAnimeHandler), (fr'{sickrage.app.config.general.web_root}/config/anime/saveAnime(/?)', ConfigSaveAnimeHandler), (fr'{sickrage.app.config.general.web_root}/config/backuprestore(/?)', ConfigBackupRestoreHandler), (fr'{sickrage.app.config.general.web_root}/config/backuprestore/backup(/?)', ConfigBackupHandler), (fr'{sickrage.app.config.general.web_root}/config/backuprestore/restore(/?)', ConfigRestoreHandler), (fr'{sickrage.app.config.general.web_root}/config/backuprestore/saveBackupRestore(/?)', SaveBackupRestoreHandler), (fr'{sickrage.app.config.general.web_root}/config/general(/?)', ConfigGeneralHandler), (fr'{sickrage.app.config.general.web_root}/config/general/generateApiKey(/?)', GenerateApiKeyHandler), (fr'{sickrage.app.config.general.web_root}/config/general/saveRootDirs(/?)', SaveRootDirsHandler), (fr'{sickrage.app.config.general.web_root}/config/general/saveAddShowDefaults(/?)', SaveAddShowDefaultsHandler), (fr'{sickrage.app.config.general.web_root}/config/general/saveGeneral(/?)', SaveGeneralHandler), (fr'{sickrage.app.config.general.web_root}/config/notifications(/?)', ConfigNotificationsHandler), (fr'{sickrage.app.config.general.web_root}/config/notifications/saveNotifications(/?)', SaveNotificationsHandler), (fr'{sickrage.app.config.general.web_root}/config/postProcessing(/?)', ConfigPostProcessingHandler), (fr'{sickrage.app.config.general.web_root}/config/postProcessing/savePostProcessing(/?)', SavePostProcessingHandler), (fr'{sickrage.app.config.general.web_root}/config/postProcessing/testNaming(/?)', TestNamingHandler), (fr'{sickrage.app.config.general.web_root}/config/postProcessing/isNamingValid(/?)', IsNamingPatternValidHandler), (fr'{sickrage.app.config.general.web_root}/config/postProcessing/isRarSupported(/?)', IsRarSupportedHandler), (fr'{sickrage.app.config.general.web_root}/config/providers(/?)', ConfigProvidersHandler), (fr'{sickrage.app.config.general.web_root}/config/providers/canAddNewznabProvider(/?)', CanAddNewznabProviderHandler), (fr'{sickrage.app.config.general.web_root}/config/providers/canAddTorrentRssProvider(/?)', CanAddTorrentRssProviderHandler), (fr'{sickrage.app.config.general.web_root}/config/providers/getNewznabCategories(/?)', GetNewznabCategoriesHandler), (fr'{sickrage.app.config.general.web_root}/config/providers/saveProviders(/?)', SaveProvidersHandler), (fr'{sickrage.app.config.general.web_root}/config/qualitySettings(/?)', ConfigQualitySettingsHandler), (fr'{sickrage.app.config.general.web_root}/config/qualitySettings/saveQualities(/?)', SaveQualitiesHandler), (fr'{sickrage.app.config.general.web_root}/config/search(/?)', ConfigSearchHandler), (fr'{sickrage.app.config.general.web_root}/config/search/saveSearch(/?)', SaveSearchHandler), (fr'{sickrage.app.config.general.web_root}/config/subtitles(/?)', ConfigSubtitlesHandler), (fr'{sickrage.app.config.general.web_root}/config/subtitles/get_code(/?)', ConfigSubtitleGetCodeHandler), (fr'{sickrage.app.config.general.web_root}/config/subtitles/wanted_languages(/?)', ConfigSubtitlesWantedLanguagesHandler), (fr'{sickrage.app.config.general.web_root}/config/subtitles/saveSubtitles(/?)', SaveSubtitlesHandler), ] # Initialize Tornado application self.app = Application( handlers=sum(self.handlers.values(), []), debug=True, autoreload=False, gzip=sickrage.app.config.general.web_use_gzip, cookie_secret=sickrage.app.config.general.web_cookie_secret, login_url='%s/login/' % sickrage.app.config.general.web_root, templates=templates, default_handler_class=NotFoundHandler) # HTTPS Cert/Key object ssl_ctx = None if sickrage.app.config.general.enable_https: ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain(sickrage.app.config.general.https_cert, sickrage.app.config.general.https_key) # Web Server self.server = HTTPServer( self.app, ssl_options=ssl_ctx, xheaders=sickrage.app.config.general.handle_reverse_proxy) try: self.server.listen(sickrage.app.config.general.web_port, sickrage.app.web_host) except socket.error as e: sickrage.app.log.warning(e.strerror) raise SystemExit # launch browser window if not sickrage.app.no_launch and sickrage.app.config.general.launch_browser: sickrage.app.scheduler.add_job( launch_browser, args=[('http', 'https')[sickrage.app.config.general.enable_https], (get_internal_ip(), sickrage.app.web_host)[sickrage.app.web_host != ''], sickrage.app.config.general.web_port]) sickrage.app.log.info("SiCKRAGE :: STARTED") sickrage.app.log.info( f"SiCKRAGE :: APP VERSION:[{sickrage.version()}]") sickrage.app.log.info( f"SiCKRAGE :: CONFIG VERSION:[v{sickrage.app.config.db.version}]") sickrage.app.log.info( f"SiCKRAGE :: DATABASE VERSION:[v{sickrage.app.main_db.version}]") sickrage.app.log.info( f"SiCKRAGE :: DATABASE TYPE:[{sickrage.app.db_type}]") sickrage.app.log.info( f"SiCKRAGE :: URL:[{('http', 'https')[sickrage.app.config.general.enable_https]}://{(get_internal_ip(), sickrage.app.web_host)[sickrage.app.web_host != '']}:{sickrage.app.config.general.web_port}/{sickrage.app.config.general.web_root}]" ) self.io_loop.start() def shutdown(self): if self.started: self.started = False if self.server: self.server.close_all_connections() self.server.stop() if self.io_loop: self.io_loop.stop()
def run(self): ioloop = IOLoop() dataFeeder.heart_beat() print("heatBeat thread", id(ioloop)) ioloop.start()
class TornadoOctopus(object): def __init__(self, concurrency=10, auto_start=False, cache=False, expiration_in_seconds=30, request_timeout_in_seconds=10, connect_timeout_in_seconds=5, ignore_pycurl=False, limiter=None, allow_connection_reuse=True): self.concurrency = concurrency self.auto_start = auto_start self.last_timeout = None self.cache = cache self.response_cache = Cache( expiration_in_seconds=expiration_in_seconds) self.request_timeout_in_seconds = request_timeout_in_seconds self.connect_timeout_in_seconds = connect_timeout_in_seconds self.ignore_pycurl = ignore_pycurl self.running_urls = 0 self.url_queue = [] if PYCURL_AVAILABLE and not self.ignore_pycurl: logging.debug( 'pycurl is available, thus Octopus will be using it instead of tornado\'s simple http client.' ) AsyncHTTPClient.configure( "tornado.curl_httpclient.CurlAsyncHTTPClient") self.allow_connection_reuse = allow_connection_reuse else: self.allow_connection_reuse = True if auto_start: logging.debug('Auto starting...') self.start() self.limiter = limiter @property def queue_size(self): return len(self.url_queue) @property def is_empty(self): return self.queue_size == 0 def start(self): logging.debug('Creating IOLoop and http_client.') self.ioloop = IOLoop() self.http_client = AsyncHTTPClient(io_loop=self.ioloop) @classmethod def from_tornado_response(cls, url, response): cookies = response.request.headers.get('Cookie', '') if cookies: cookies = dict( [cookie.split('=') for cookie in cookies.split(';')]) return Response(url=url, status_code=response.code, headers=dict([ (key, value) for key, value in response.headers.items() ]), cookies=cookies, text=response.body, effective_url=response.effective_url, error=response.error and str(response.error) or None, request_time=response.request_time) def enqueue(self, url, handler, method='GET', **kw): logging.debug('Enqueueing %s...' % url) if self.cache: response = self.response_cache.get(url) if response is not None: logging.debug('Cache hit on %s.' % url) handler(url, response) return if self.running_urls < self.concurrency: logging.debug('Queue has space available for fetching %s.' % url) self.get_next_url(url, handler, method, **kw) else: logging.debug('Queue is full. Enqueueing %s for future fetch.' % url) self.url_queue.append((url, handler, method, kw)) def fetch(self, url, handler, method, **kw): self.running_urls += 1 if self.cache: response = self.response_cache.get(url) if response is not None: logging.debug('Cache hit on %s.' % url) self.running_urls -= 1 handler(url, response) return logging.info('Fetching %s...' % url) request = HTTPRequest(url=url, method=method, connect_timeout=self.connect_timeout_in_seconds, request_timeout=self.request_timeout_in_seconds, prepare_curl_callback=self.handle_curl_callback, **kw) self.http_client.fetch(request, self.handle_request(url, handler)) def handle_curl_callback(self, curl): if not self.allow_connection_reuse: curl.setopt(pycurl.FRESH_CONNECT, 1) def get_next_url(self, request_url=None, handler=None, method=None, **kw): if request_url is None: if not self.url_queue: return request_url, handler, method, kw = self.url_queue.pop() self.fetch_next_url(request_url, handler, method, **kw) def fetch_next_url(self, request_url, handler, method, **kw): if self.limiter and not self.limiter.acquire(request_url): logging.info('Could not acquire limit for url "%s".' % request_url) self.url_queue.append((request_url, handler, method, kw)) deadline = timedelta(seconds=self.limiter.limiter_miss_timeout_ms / 1000.0) self.ioloop.add_timeout(deadline, self.get_next_url) self.limiter.publish_lock_miss(request_url) return False logging.debug('Queue has space available for fetching %s.' % request_url) self.fetch(request_url, handler, method, **kw) return True def handle_request(self, url, callback): def handle(response): logging.debug('Handler called for url %s...' % url) self.running_urls -= 1 response = self.from_tornado_response(url, response) logging.info('Got response(%s) from %s.' % (response.status_code, url)) if self.cache and response and response.status_code < 399: logging.debug('Putting %s into cache.' % url) self.response_cache.put(url, response) if self.limiter: self.limiter.release(url) try: callback(url, response) except Exception: logging.exception('Error calling callback for %s.' % url) if self.running_urls < self.concurrency and self.url_queue: self.get_next_url() logging.debug( 'Getting %d urls and still have %d more urls to get...' % (self.running_urls, self.remaining_requests)) if self.running_urls < 1 and self.remaining_requests == 0: logging.debug('Nothing else to get. Stopping Octopus...') self.stop() return handle def handle_wait_timeout(self, signal_number, frames): logging.debug( 'Timeout waiting for IOLoop to finish. Stopping IOLoop manually.') self.stop(force=True) def wait(self, timeout=10): self.last_timeout = timeout if not self.url_queue and not self.running_urls: logging.debug('No urls to wait for. Returning immediately.') return if timeout: logging.debug('Waiting for urls to be retrieved for %s seconds.' % timeout) self.ioloop.set_blocking_signal_threshold(timeout, self.handle_wait_timeout) else: logging.debug('Waiting for urls to be retrieved.') logging.info('Starting IOLoop with %d URLs still left to process.' % self.remaining_requests) self.ioloop.start() @property def remaining_requests(self): return len(self.url_queue) def stop(self, force=False): logging.info('Stopping IOLoop with %d URLs still left to process.' % self.remaining_requests) self.ioloop.stop()