def write_error(self, status_code, **kwargs): if self._output_array: self._output = [] else: self._output = {} if kwargs.has_key("exc_info"): exc = kwargs['exc_info'][1] # Restart DB on a connection error if that's what we're handling if isinstance(exc, (psycopg2.OperationalError, psycopg2.InterfaceError)): try: db.close() db.connect() self.append("error", { "code": 500, "tl_key": "db_error_retry", "text": self.locale.translate("db_error_retry") }) except: self.append("error", { "code": 500, "tl_key": "db_error_permanent", "text": self.locale.translate("db_error_permanent") }) elif isinstance(exc, APIException): exc.localize(self.locale) self.append(self.return_name, exc.jsonable()) elif isinstance(exc, SongNonExistent): self.append("error", { "code": status_code, "tl_key": "song_does_not_exist", "text": self.locale.translate("song_does_not_exist") }) else: self.append("error", { "code": status_code, "tl_key": "internal_error", "text": repr(exc) }) self.append("traceback", { "traceback": traceback.format_exception(kwargs['exc_info'][0], kwargs['exc_info'][1], kwargs['exc_info'][2]) }) else: self.append("error", { "tl_key": "internal_error", "text": self.locale.translate("internal_error") } ) self.finish()
def get(self, sid): self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return try: schedule.advance_station(self.sid) except psycopg2.extensions.TransactionRollbackError as e: if not self.retried: self.retried = True log.warn("backend", "Database transaction deadlock. Re-opening database and setting retry timeout.") db.close() db.open() tornado.ioloop.IOLoop.instance().add_timeout(datetime.timedelta(milliseconds=350), self.get) else: raise if not config.get("liquidsoap_annotations"): self.write(schedule.get_current_file(self.sid)) else: self.write(self._get_annotated(schedule.get_current_event(self.sid))) self.success = True
def get(self, sid): self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return try: schedule.advance_station(self.sid) except psycopg2.extensions.TransactionRollbackError as e: if not self.retried: self.retried = True log.warn( "backend", "Database transaction deadlock. Re-opening database and setting retry timeout." ) db.close() db.open() tornado.ioloop.IOLoop.instance().add_timeout( datetime.timedelta(milliseconds=350), self.get) else: raise if not config.get("liquidsoap_annotations"): self.write(schedule.get_current_file(self.sid)) else: self.write( self._get_annotated(schedule.get_current_event(self.sid))) self.success = True
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() cache.open() for sid in config.station_ids: cache.update_local_cache_for_sid(sid) # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while (i < len(request_classes)): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r".*", api.web.Error404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Initialize playlist variables playlist.prepare_cooldown_algorithm(sid) # Fire ze missiles! app = tornado.web.Application( request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def start(): db.open() cache.open() if config.test_mode: playlist.remove_all_locks(1) app = tornado.web.Application([ (r"/advance/([0-9]+)", AdvanceScheduleRequest), (r"/refresh/([0-9]+)", RefreshScheduleRequest) ], debug=(config.test_mode or config.get("developer_mode"))) server = tornado.httpserver.HTTPServer(app) server.listen(int(config.get("backend_port")), address='127.0.0.1') if config.get("backend_user") or config.get("backend_group"): chuser.change_user(config.get("backend_user"), config.get("backend_group")) pid = os.getpid() pidfile = open(config.get("backend_pid_file"), 'w') pidfile.write(str(pid)) pidfile.close() schedule.load() log.debug("start", "Backend server bootstrapped, port %s, ready to go." % int(config.get("backend_port"))) for sid in config.station_ids: playlist.prepare_cooldown_algorithm(sid) try: tornado.ioloop.IOLoop.instance().start() finally: db.close()
def _handle_event(self, event): try: if hasattr(event, "src_path") and event.src_path and check_file_is_in_directory(event.src_path, self.root_directory): if _is_bad_extension(event.src_path): pass elif not os.path.isdir(event.src_path): log.debug("scan_event", "%s src_path for file %s" % (event.event_type, event.src_path)) if _is_image(event.src_path) and (event.event_type == 'deleted' or event.event_type == 'moved'): pass else: self._handle_file(event.src_path) else: log.debug("scan_event", "%s src_path for dir %s" % (event.event_type, event.src_path)) self._handle_directory(event.src_path) if hasattr(event, "dest_path") and event.dest_path and check_file_is_in_directory(event.dest_path, self.root_directory): if _is_bad_extension(event.dest_path): pass elif not os.path.isdir(event.dest_path): log.debug("scan_event", "%s dest_path for file %s" % (event.event_type, event.dest_path)) if _is_image(event.dest_path) and (event.event_type == 'deleted'): pass else: self._handle_file(event.dest_path) else: log.debug("scan_event", "%s dest_path for dir %s" % (event.event_type, event.dest_path)) self._handle_directory(event.dest_path) except Exception as xception: _add_scan_error(self.root_directory, xception) log.critical("scan_event", "Exception occurred - reconnecting to the database just in case.") db.close() db.connect()
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() cache.open() for sid in config.station_ids: cache.update_local_cache_for_sid(sid) # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while i < len(request_classes): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r".*", api.web.Error404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Initialize playlist variables playlist.prepare_cooldown_algorithm(sid) # Fire ze missiles! app = tornado.web.Application( request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None, ) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def _html_write_error(self, status_code, **kwargs): if kwargs.has_key("exc_info"): exc = kwargs['exc_info'][1] # Restart DB on a connection error if that's what we're handling if isinstance(exc, (psycopg2.OperationalError, psycopg2.InterfaceError)): try: db.close() db.connect() self.append("error", { "code": 500, "tl_key": "db_error_retry", "text": self.locale.translate("db_error_retry") }) except: self.append("error", { "code": 500, "tl_key": "db_error_permanent", "text": self.locale.translate("db_error_permanent") }) elif isinstance(exc, APIException): if not isinstance(self.locale, locale.RainwaveLocale): exc.localize(locale.RainwaveLocale.get("en_CA")) else: exc.localize(self.locale) if (isinstance(exc, APIException) or isinstance(exc, tornado.web.HTTPError)) and exc.reason: self.write(self.render_string("basic_header.html", title="%s - %s" % (status_code, exc.reason))) else: self.write(self.render_string("basic_header.html", title="HTTP %s - %s" % (status_code, tornado.httputil.responses.get(status_code, 'Unknown')))) if status_code == 500 or config.get("developer_mode"): self.write("<p>") self.write(self.locale.translate("unknown_error_message")) self.write("</p><p>") self.write(self.locale.translate("debug_information")) self.write("</p><div class='json'>") for line in traceback.format_exception(kwargs['exc_info'][0], kwargs['exc_info'][1], kwargs['exc_info'][2]): self.write(line) self.write("</div>") self.finish()
def _listen(self, sid): pid = os.getpid() pid_file = open( "%s/backend_%s.pid" % ( config.get_directory("pid_dir"), config.station_id_friendly[sid].lower(), ), "w", ) pid_file.write(str(pid)) pid_file.close() db.connect() cache.connect() zeromq.init_pub() log.init( "%s/rw_%s.log" % ( config.get_directory("log_dir"), config.station_id_friendly[sid].lower(), ), config.get("log_level"), ) memory_trace.setup(config.station_id_friendly[sid].lower()) if config.test_mode: playlist.remove_all_locks(sid) # (r"/refresh/([0-9]+)", RefreshScheduleRequest) app = tornado.web.Application( [ (r"/advance/([0-9]+)", AdvanceScheduleRequest), ], debug=(config.test_mode or config.get("developer_mode")), ) port = int(config.get("backend_port")) + sid server = tornado.httpserver.HTTPServer(app) server.listen(port, address="127.0.0.1") for station_id in config.station_ids: playlist.prepare_cooldown_algorithm(station_id) schedule.load() log.debug( "start", "Backend server started, station %s port %s, ready to go." % (config.station_id_friendly[sid], port), ) ioloop = tornado.ioloop.IOLoop.instance() try: ioloop.start() finally: ioloop.stop() server.stop() db.close() log.info("stop", "Backend has been shutdown.") log.close()
def _handle_event(self, event): try: if hasattr(event, "src_path" ) and event.src_path and check_file_is_in_directory( event.src_path, self.root_directory): if _is_bad_extension(event.src_path): pass elif not os.path.isdir(event.src_path): log.debug( "scan_event", "%s src_path for file %s" % (event.event_type, event.src_path)) if _is_image(event.src_path) and ( event.event_type == 'deleted' or event.event_type == 'moved'): pass else: self._handle_file(event.src_path) else: log.debug( "scan_event", "%s src_path for dir %s" % (event.event_type, event.src_path)) self._handle_directory(event.src_path) if hasattr(event, "dest_path" ) and event.dest_path and check_file_is_in_directory( event.dest_path, self.root_directory): if _is_bad_extension(event.dest_path): pass elif not os.path.isdir(event.dest_path): log.debug( "scan_event", "%s dest_path for file %s" % (event.event_type, event.dest_path)) if _is_image(event.dest_path) and (event.event_type == 'deleted'): pass else: self._handle_file(event.dest_path) else: log.debug( "scan_event", "%s dest_path for dir %s" % (event.event_type, event.dest_path)) self._handle_directory(event.dest_path) except Exception as xception: _add_scan_error(self.root_directory, xception) log.critical( "scan_event", "Exception occurred - reconnecting to the database just in case." ) db.close() db.connect()
def get(self, sid): #pylint: disable=W0221 self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return if cache.get_station(self.sid, "backend_paused") and cache.get_station(self.sid, "backend_pause_extend"): self.write(self._get_pause_file()) cache.set_station(self.sid, "backend_pause_extend", False) cache.set_station(self.sid, "backend_paused_playing", True) return else: cache.set_station(self.sid, "backend_pause_extend", False) cache.set_station(self.sid, "backend_paused", False) cache.set_station(self.sid, "backend_paused_playing", False) # This program must be run on 1 station for 1 instance, which would allow this operation to be safe. # Also works if 1 process is serving all stations. Pinging any instance for any station # would break the program here, though. if cache.get_station(self.sid, "get_next_socket_timeout") and sid_output[self.sid]: log.warn("backend", "Using previous output to prevent flooding.") self.write(sid_output[self.sid]) sid_output[self.sid] = None self.success = True else: try: schedule.advance_station(self.sid) except (psycopg2.OperationalError, psycopg2.InterfaceError) as e: log.warn("backend", e.diag.message_primary) db.close() db.connect() raise except psycopg2.extensions.TransactionRollbackError as e: log.warn("backend", "Database transaction deadlock. Re-opening database and setting retry timeout.") db.close() db.connect() raise to_send = None if not config.get("liquidsoap_annotations"): to_send = schedule.get_advancing_file(self.sid) else: to_send = self._get_annotated(schedule.get_advancing_event(self.sid)) sid_output[self.sid] = to_send self.success = True if not cache.get_station(self.sid, "get_next_socket_timeout"): self.write(to_send)
def get(self, sid): self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return if cache.get_station(self.sid, "backend_paused"): if not cache.get_station(self.sid, "dj_heartbeat_start"): log.debug("dj", "Setting server start heatbeat.") cache.set_station(self.sid, "dj_heartbeat_start", timestamp()) self.write(self._get_pause_file()) schedule.set_upnext_crossfade(self.sid, False) cache.set_station(self.sid, "backend_paused_playing", True) sync_to_front.sync_frontend_dj(self.sid) return else: cache.set_station(self.sid, "dj_heartbeat_start", False) cache.set_station(self.sid, "backend_paused", False) cache.set_station(self.sid, "backend_paused_playing", False) try: schedule.advance_station(self.sid) except (psycopg2.OperationalError, psycopg2.InterfaceError) as e: log.warn("backend", e.diag.message_primary) db.close() db.connect() raise except psycopg2.extensions.TransactionRollbackError as e: log.warn( "backend", "Database transaction deadlock. Re-opening database and setting retry timeout.", ) db.close() db.connect() raise to_send = None if not config.get("liquidsoap_annotations"): to_send = schedule.get_advancing_file(self.sid) else: to_send = self._get_annotated( schedule.get_advancing_event(self.sid)) self.success = True if not cache.get_station(self.sid, "get_next_socket_timeout"): self.write(to_send)
def get(self, sid): #pylint: disable=W0221 self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return if cache.get_station(self.sid, "backend_paused"): if not cache.get_station(self.sid, "dj_heartbeat_start"): log.debug("dj", "Setting server start heatbeat.") cache.set_station(self.sid, "dj_heartbeat_start", timestamp()) self.write(self._get_pause_file()) schedule.set_upnext_crossfade(self.sid, False) cache.set_station(self.sid, "backend_paused_playing", True) sync_to_front.sync_frontend_dj(self.sid) return else: cache.set_station(self.sid, "dj_heartbeat_start", False) cache.set_station(self.sid, "backend_paused", False) cache.set_station(self.sid, "backend_paused_playing", False) try: schedule.advance_station(self.sid) except (psycopg2.OperationalError, psycopg2.InterfaceError) as e: log.warn("backend", e.diag.message_primary) db.close() db.connect() raise except psycopg2.extensions.TransactionRollbackError as e: log.warn("backend", "Database transaction deadlock. Re-opening database and setting retry timeout.") db.close() db.connect() raise to_send = None if not config.get("liquidsoap_annotations"): to_send = schedule.get_advancing_file(self.sid) else: to_send = self._get_annotated(schedule.get_advancing_event(self.sid)) self.success = True if not cache.get_station(self.sid, "get_next_socket_timeout"): self.write(to_send)
def get(self, sid): self.success = False self.sid = None if int(sid) in config.station_ids: self.sid = int(sid) else: return # This program must be run on 1 station for 1 instance, which would allow this operation to be safe. # Also works if 1 process is serving all stations. Pinging any instance for any station # would break the program here, though. if cache.get_station(self.sid, "get_next_socket_timeout") and sid_output[self.sid]: log.warn("backend", "Using previous output to prevent flooding.") self.write(sid_output[self.sid]) sid_output[self.sid] = None self.success = True else: try: schedule.advance_station(self.sid) except (psycopg2.OperationalError, psycopg2.InterfaceError) as e: log.warn("backend", e.diag.message_primary) db.close() db.connect() raise except psycopg2.extensions.TransactionRollbackError as e: log.warn("backend", "Database transaction deadlock. Re-opening database and setting retry timeout.") db.close() db.connect() raise to_send = None if not config.get("liquidsoap_annotations"): to_send = schedule.get_advancing_file(self.sid) else: to_send = self._get_annotated(schedule.get_advancing_event(self.sid)) sid_output[self.sid] = to_send self.success = True if not cache.get_station(self.sid, "get_next_socket_timeout"): self.write(to_send)
def _listen(self, sid): pid = os.getpid() pid_file = open("%s/backend_%s.pid" % (config.get_directory("pid_dir"), config.station_id_friendly[sid].lower()), 'w') pid_file.write(str(pid)) pid_file.close() db.connect() cache.connect() zeromq.init_pub() log.init("%s/rw_%s.log" % (config.get_directory("log_dir"), config.station_id_friendly[sid].lower()), config.get("log_level")) memory_trace.setup(config.station_id_friendly[sid].lower()) if config.test_mode: playlist.remove_all_locks(sid) # (r"/refresh/([0-9]+)", RefreshScheduleRequest) app = tornado.web.Application([ (r"/advance/([0-9]+)", AdvanceScheduleRequest), ], debug=(config.test_mode or config.get("developer_mode"))) port = int(config.get("backend_port")) + sid server = tornado.httpserver.HTTPServer(app) server.listen(port, address='127.0.0.1') for station_id in config.station_ids: playlist.prepare_cooldown_algorithm(station_id) schedule.load() log.debug("start", "Backend server started, station %s port %s, ready to go." % (config.station_id_friendly[sid], port)) ioloop = tornado.ioloop.IOLoop.instance() try: ioloop.start() finally: ioloop.stop() server.stop() db.close() log.info("stop", "Backend has been shutdown.") log.close()
def _listen(self, task_id): # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get("api_log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.open() cache.open() for sid in config.station_ids: cache.update_local_cache_for_sid(sid) # Fire ze missiles! app = tornado.web.Application(request_classes, debug=config.get("debug_mode"), template_path=os.path.join(os.path.dirname(__file__), "../templates")) http_server = tornado.httpserver.HTTPServer(app, xheaders = True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "Server bootstrapped and ready to go.") self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def start(): db.open() cache.open() if config.test_mode: playlist.remove_all_locks(1) app = tornado.web.Application( [(r"/advance/([0-9]+)", AdvanceScheduleRequest), (r"/refresh/([0-9]+)", RefreshScheduleRequest)], debug=(config.test_mode or config.get("developer_mode"))) server = tornado.httpserver.HTTPServer(app) server.listen(int(config.get("backend_port")), address='127.0.0.1') if config.get("backend_user") or config.get("backend_group"): chuser.change_user(config.get("backend_user"), config.get("backend_group")) pid = os.getpid() pidfile = open(config.get("backend_pid_file"), 'w') pidfile.write(str(pid)) pidfile.close() schedule.load() log.debug( "start", "Backend server bootstrapped, port %s, ready to go." % int(config.get("backend_port"))) for sid in config.station_ids: playlist.prepare_cooldown_algorithm(sid) try: tornado.ioloop.IOLoop.instance().start() finally: db.close()
def stop(self, *args, **kwargs): super(RWObserver, self).stop(*args, **kwargs) db.close()
def _listen(self, task_id): zeromq.init_pub() zeromq.init_sub() import api_requests.sync api_requests.sync.init() # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id pid = os.getpid() pid_file = open("%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w') pid_file.write(str(pid)) pid_file.close() # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.connect() cache.connect() memory_trace.setup(port_no) api.locale.load_translations() api.locale.compile_static_language_files() if config.get("web_developer_mode"): for station_id in config.station_ids: playlist.prepare_cooldown_algorithm(station_id) # automatically loads every station ID and fills things in if there's no data schedule.load() for station_id in config.station_ids: schedule.update_memcache(station_id) rainwave.request.update_line(station_id) rainwave.request.update_expire_times() cache.set_station(station_id, "backend_ok", True) cache.set_station(station_id, "backend_message", "OK") cache.set_station(station_id, "get_next_socket_timeout", False) for sid in config.station_ids: cache.update_local_cache_for_sid(sid) playlist.prepare_cooldown_algorithm(sid) playlist.update_num_songs() # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while (i < len(request_classes)): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r"/api/.*", api.web.Error404Handler)) request_classes.append((r"/api4/.*", api.web.Error404Handler)) request_classes.append((r".*", api.web.HTMLError404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Fire ze missiles! global app app = tornado.web.Application(request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None) http_server = tornado.httpserver.HTTPServer(app, xheaders = True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server on port %s ready to go." % port_no) self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def write_error(self, status_code, **kwargs): if self._output_array: self._output = [] else: if self._output and "message_id" in self._output: self._output = { "message_id": self._output["message_id"], } self._output[self.return_name] = { "tl_key": "internal_error", "text": self.locale.translate("internal_error"), "status": 500, "success": False, } else: self._output = {} if "exc_info" in kwargs: exc = kwargs["exc_info"][1] # Restart DB on a connection error if that's what we're handling if isinstance( exc, (psycopg2.OperationalError, psycopg2.InterfaceError)): try: db.close() db.connect() self.append( "error", { "code": 500, "tl_key": "db_error_retry", "text": self.locale.translate("db_error_retry"), }, ) except Exception: self.append( "error", { "code": 500, "tl_key": "db_error_permanent", "text": self.locale.translate("db_error_permanent"), }, ) elif isinstance(exc, APIException): exc.localize(self.locale) self.append(self.return_name, exc.jsonable()) elif isinstance(exc, SongNonExistent): self.append( "error", { "code": status_code, "tl_key": "song_does_not_exist", "text": self.locale.translate("song_does_not_exist"), }, ) else: self.append( "error", { "code": status_code, "tl_key": "internal_error", "text": repr(exc), }, ) self.append( "traceback", { "traceback": traceback.format_exception( kwargs["exc_info"][0], kwargs["exc_info"][1], kwargs["exc_info"][2], ) }, ) else: self.append( "error", { "tl_key": "internal_error", "text": self.locale.translate("internal_error"), }, ) if not kwargs.get("no_finish"): self.finish()
def _listen(self, task_id): import api_requests.sync api_requests.sync.init() # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id pid = os.getpid() pid_file = open( "%s/api_%s.pid" % (config.get_directory("pid_dir"), port_no), 'w') pid_file.write(str(pid)) pid_file.close() # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"), port_no) if config.test_mode and os.path.exists(log_file): os.remove(log_file) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.connect() cache.connect() memory_trace.setup(port_no) if config.get("web_developer_mode"): for station_id in config.station_ids: playlist.prepare_cooldown_algorithm(station_id) # automatically loads every station ID and fills things in if there's no data schedule.load() for station_id in config.station_ids: schedule.update_memcache(station_id) rainwave.request.update_line(station_id) rainwave.request.update_expire_times() cache.set_station(station_id, "backend_ok", True) cache.set_station(station_id, "backend_message", "OK") cache.set_station(station_id, "get_next_socket_timeout", False) for sid in config.station_ids: cache.update_local_cache_for_sid(sid) playlist.prepare_cooldown_algorithm(sid) playlist.update_num_songs() # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while (i < len(request_classes)): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r"/api/.*", api.web.Error404Handler)) request_classes.append((r"/api4/.*", api.web.Error404Handler)) request_classes.append((r".*", api.web.HTMLError404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Fire ze missiles! app = tornado.web.Application( request_classes, debug=(config.test_mode or config.get("developer_mode")), template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) if config.get("api_user") and config.get("api_group"): chuser.change_user(config.get("api_user"), config.get("api_group")) if task_id == 0: buildtools.bake_css() buildtools.bake_js() buildtools.bake_beta_js() for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server on port %s ready to go." % port_no) self.ioloop = tornado.ioloop.IOLoop.instance() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()
def _listen(self, task_id): zeromq.init_pub() zeromq.init_sub() import api_requests.sync api_requests.sync.init() # task_ids start at zero, so we gobble up ports starting at the base port and work up port_no = int(config.get("api_base_port")) + task_id # Log according to configured directory and port # we're operating on log_file = "%s/rw_api_%s.log" % (config.get_directory("log_dir"), port_no) log.init(log_file, config.get("log_level")) log.debug("start", "Server booting, port %s." % port_no) db.connect(auto_retry=False, retry_only_this_time=True) cache.connect() memory_trace.setup(port_no) api.locale.load_translations() api.locale.compile_static_language_files() if config.get("developer_mode"): for station_id in config.station_ids: playlist.prepare_cooldown_algorithm(station_id) # automatically loads every station ID and fills things in if there's no data schedule.load() for station_id in config.station_ids: schedule.update_memcache(station_id) rainwave.request.update_line(station_id) rainwave.request.update_expire_times() cache.set_station(station_id, "backend_ok", True) cache.set_station(station_id, "backend_message", "OK") cache.set_station(station_id, "get_next_socket_timeout", False) for sid in config.station_ids: cache.update_local_cache_for_sid(sid) playlist.prepare_cooldown_algorithm(sid) playlist.update_num_songs() # If we're not in developer, remove development-related URLs if not config.get("developer_mode"): i = 0 while i < len(request_classes): if request_classes[i][0].find("/test/") != -1: request_classes.pop(i) i = i - 1 i = i + 1 # Make sure all other errors get handled in an API-friendly way request_classes.append((r"/api/.*", api.web.Error404Handler)) request_classes.append((r"/api4/.*", api.web.Error404Handler)) request_classes.append((r".*", api.web.HTMLError404Handler)) # Initialize the help (rather than it scan all URL handlers every time someone hits it) api.help.sectionize_requests() # Fire ze missiles! global app debug = config.get("developer_mode") app = tornado.web.Application( request_classes, debug=debug, template_path=os.path.join(os.path.dirname(__file__), "../templates"), static_path=os.path.join(os.path.dirname(__file__), "../static"), autoescape=None, autoreload=debug, serve_traceback=debug, ) http_server = tornado.httpserver.HTTPServer(app, xheaders=True) http_server.listen(port_no) for request in request_classes: log.debug("start", " Handler: %s" % str(request)) log.info("start", "API server on port %s ready to go." % port_no) self.ioloop = tornado.ioloop.IOLoop.instance() db_keepalive = tornado.ioloop.PeriodicCallback(db.connection_keepalive, 10000) db_keepalive.start() try: self.ioloop.start() finally: self.ioloop.stop() http_server.stop() db.close() log.info("stop", "Server has been shutdown.") log.close()