class Core(object): def __init__(self): self.started = False self.loading_shows = False self.daemon = None self.pid = os.getpid() self.gui_static_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'static') self.gui_views_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'views') self.gui_app_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'app') self.https_cert_file = None self.https_key_file = None self.trakt_api_key = '5c65f55e11d48c35385d9e8670615763a605fad28374c8ae553a7b7a50651ddd' self.trakt_api_secret = 'b53e32045ac122a445ef163e6d859403301ffe9b17fb8321d428531b69022a82' self.trakt_app_id = '4562' self.fanart_api_key = '9b3afaf26f6241bdb57d6cc6bd798da7' self.git_remote = "origin" self.git_remote_url = "https://git.sickrage.ca/SiCKRAGE/sickrage" self.unrar_tool = rarfile.UNRAR_TOOL self.naming_force_folders = False self.min_auto_postprocessor_freq = 1 self.min_daily_searcher_freq = 10 self.min_backlog_searcher_freq = 10 self.min_version_updater_freq = 1 self.min_subtitle_searcher_freq = 1 self.min_failed_snatch_age = 1 try: self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() except Exception: self.tz = tz.tzlocal() self.shows = {} self.shows_recent = deque(maxlen=5) self.main_db = None self.cache_db = None self.config_file = None self.data_dir = None self.cache_dir = None self.quiet = None self.no_launch = None self.disable_updates = None self.web_port = None self.web_host = None self.web_root = None self.developer = None self.db_type = None self.db_prefix = None self.db_host = None self.db_port = None self.db_username = None self.db_password = None self.debug = None self.latest_version_string = None self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02") self.naming_multi_ep_type = { 0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")] } self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format( platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [ language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language ] self.client_web_urls = {'torrent': '', 'newznab': ''} self.notification_providers = {} self.metadata_providers = {} self.search_providers = {} self.series_providers = {} self.adba_connection = None self.log = None self.config = None self.alerts = None self.scheduler = None self.wserver = None self.google_auth = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.tz_updater = None self.rsscache_updater = None self.daily_searcher = None self.failed_snatch_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.auth_server = None self.announcements = None self.api = None self.amqp_client = None def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # init sentry self.init_sentry() # scheduler self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'}) # init core classes self.api = API() self.config = Config(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.notification_providers = NotificationProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.series_providers = SeriesProviders() self.log = Logger() self.alerts = Notifications() self.wserver = WebServer() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.announcements = Announcements() self.amqp_client = AMQPClient() # authorization sso client self.auth_server = AuthServer() # check available space try: self.log.info("Performing disk space checks") total_space, available_space = get_free_space(self.data_dir) if available_space < 100: self.log.warning( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): self.log.info('Performing restore of backup files') success = restore_app_data( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success]) if success: # remove restore files shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # setup databases self.main_db.setup() self.config.db.setup() self.cache_db.setup() # load config self.config.load() # migrate config self.config.migrate_config_file(self.config_file) # add server id tag to sentry sentry_sdk.set_tag('server_id', self.config.general.server_id) # add user to sentry sentry_sdk.set_user({ 'id': self.config.user.sub_id, 'username': self.config.user.username, 'email': self.config.user.email }) # config overrides if self.web_port: self.config.general.web_port = self.web_port if self.web_root: self.config.general.web_root = self.web_root # set language change_gui_lang(self.config.gui.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.general.socket_timeout) # set ssl cert/key filenames self.https_cert_file = os.path.abspath( os.path.join(self.data_dir, 'server.crt')) self.https_key_file = os.path.abspath( os.path.join(self.data_dir, 'server.key')) # setup logger settings self.log.logSize = self.config.general.log_size self.log.logNr = self.config.general.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.debug or self.config.general.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # user agent if self.config.general.random_user_agent: self.user_agent = UserAgent().random uses_netloc.append('scgi') FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) if self.config.general.default_page not in DefaultHomePage: self.config.general.default_page = DefaultHomePage.HOME # attempt to help prevent users from breaking links by using a bad url if not self.config.general.anon_redirect.endswith('?'): self.config.general.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.general.root_dirs): self.config.general.root_dirs = '' self.naming_force_folders = check_force_season_folders() if self.config.general.nzb_method not in NzbMethod: self.config.general.nzb_method = NzbMethod.BLACKHOLE if self.config.general.torrent_method not in TorrentMethod: self.config.general.torrent_method = TorrentMethod.BLACKHOLE if self.config.general.auto_postprocessor_freq < self.min_auto_postprocessor_freq: self.config.general.auto_postprocessor_freq = self.min_auto_postprocessor_freq if self.config.general.daily_searcher_freq < self.min_daily_searcher_freq: self.config.general.daily_searcher_freq = self.min_daily_searcher_freq if self.config.general.backlog_searcher_freq < self.min_backlog_searcher_freq: self.config.general.backlog_searcher_freq = self.min_backlog_searcher_freq if self.config.general.version_updater_freq < self.min_version_updater_freq: self.config.general.version_updater_freq = self.min_version_updater_freq if self.config.general.subtitle_searcher_freq < self.min_subtitle_searcher_freq: self.config.general.subtitle_searcher_freq = self.min_subtitle_searcher_freq if self.config.failed_snatches.age < self.min_failed_snatch_age: self.config.failed_snatches.age = self.min_failed_snatch_age if self.config.general.proper_searcher_interval not in CheckPropersInterval: self.config.general.proper_searcher_interval = CheckPropersInterval.DAILY if self.config.general.show_update_hour < 0 or self.config.general.show_update_hour > 23: self.config.general.show_update_hour = 0 # add app updater job self.scheduler.add_job( self.version_updater.task, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc'), name=self.version_updater.name, id=self.version_updater.name) # add show updater job self.scheduler.add_job( self.show_updater.task, IntervalTrigger(days=1, start_date=datetime.datetime.now().replace( hour=self.config.general.show_update_hour), timezone='utc'), name=self.show_updater.name, id=self.show_updater.name) # add rss cache updater job self.scheduler.add_job(self.rsscache_updater.task, IntervalTrigger(minutes=15, timezone='utc'), name=self.rsscache_updater.name, id=self.rsscache_updater.name) # add daily search job self.scheduler.add_job( self.daily_searcher.task, IntervalTrigger(minutes=self.config.general.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc'), name=self.daily_searcher.name, id=self.daily_searcher.name) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.task, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc'), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name) # add backlog search job self.scheduler.add_job( self.backlog_searcher.task, IntervalTrigger(minutes=self.config.general.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30), timezone='utc'), name=self.backlog_searcher.name, id=self.backlog_searcher.name) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.task, IntervalTrigger( minutes=self.config.general.auto_postprocessor_freq, timezone='utc'), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name) # add find proper job self.scheduler.add_job( self.proper_searcher.task, IntervalTrigger( minutes=self.config.general.proper_searcher_interval.value, timezone='utc'), name=self.proper_searcher.name, id=self.proper_searcher.name) # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.task, IntervalTrigger(hours=1, timezone='utc'), name=self.trakt_searcher.name, id=self.trakt_searcher.name) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.task, IntervalTrigger(hours=self.config.general.subtitle_searcher_freq, timezone='utc'), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name) # add upnp client job self.scheduler.add_job( self.upnp_client.task, IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime, timezone='utc'), name=self.upnp_client.name, id=self.upnp_client.name) # start queues self.search_queue.start_worker(self.config.general.max_queue_workers) self.show_queue.start_worker(self.config.general.max_queue_workers) self.postprocessor_queue.start_worker( self.config.general.max_queue_workers) # start web server self.wserver.start() # start scheduler service self.scheduler.start() # perform server checkup IOLoop.current().add_callback(self.server_checkup) # load shows IOLoop.current().add_callback(self.load_shows) # load network timezones IOLoop.current().spawn_callback( self.tz_updater.update_network_timezones) # load search provider urls IOLoop.current().spawn_callback(self.search_providers.update_urls) # startup message IOLoop.current().add_callback(self.startup_message) # launch browser IOLoop.current().add_callback(self.launch_browser) # perform server checkups every hour PeriodicCallback(self.server_checkup, 1 * 60 * 60 * 1000).start() # perform shutdown trigger check every 5 seconds PeriodicCallback(self.shutdown_trigger, 5 * 1000).start() # start ioloop IOLoop.current().start() def init_sentry(self): # sentry log handler sentry_logging = LoggingIntegration( level=logging.INFO, # Capture info and above as breadcrumbs event_level=logging.ERROR # Send errors as events ) # init sentry logging sentry_sdk.init( dsn= "https://[email protected]/2?verify_ssl=0", integrations=[sentry_logging], release=sickrage.version(), environment=('master', 'develop')['dev' in sickrage.version()], ignore_errors=[ 'KeyboardInterrupt', 'PermissionError', 'FileNotFoundError', 'EpisodeNotFoundException' ]) # sentry tags sentry_tags = { 'platform': platform.platform(), 'locale': repr(locale.getdefaultlocale()), 'python': platform.python_version(), 'install_type': sickrage.install_type() } # set sentry tags for tag_key, tag_value in sentry_tags.items(): sentry_sdk.set_tag(tag_key, tag_value) # set loggers to ignore ignored_loggers = [ 'enzyme.parsers.ebml.core', 'subliminal.core', 'subliminal.utils', 'subliminal.refiners.metadata', 'subliminal.providers.tvsubtitles', 'pika.connection', 'pika.adapters.base_connection', 'pika.adapters.utils.io_services_utils', 'pika.adapters.utils.connection_workflow', 'pika.adapters.utils.selector_ioloop_adapter' ] for item in ignored_loggers: ignore_logger(item) def server_checkup(self): if self.config.general.server_id: server_status = self.api.server.get_status( self.config.general.server_id) if server_status and not server_status['registered']: # re-register server server_id = self.api.server.register_server( ip_addresses=','.join([get_internal_ip()]), web_protocol=('http', 'https')[self.config.general.enable_https], web_port=self.config.general.web_port, web_root=self.config.general.web_root, server_version=sickrage.version(), ) if server_id: self.log.info( 'Re-registered SiCKRAGE server with SiCKRAGE API') sentry_sdk.set_tag('server_id', self.config.general.server_id) self.config.general.server_id = server_id self.config.save(mark_dirty=True) else: self.log.debug('Updating SiCKRAGE server data on SiCKRAGE API') # update server information self.api.server.update_server( server_id=self.config.general.server_id, ip_addresses=','.join([get_internal_ip()]), web_protocol=('http', 'https')[self.config.general.enable_https], web_port=self.config.general.web_port, web_root=self.config.general.web_root, server_version=sickrage.version(), ) def load_shows(self): threading.currentThread().setName('CORE') session = self.main_db.session() self.log.info('Loading initial shows list') self.loading_shows = True self.shows = {} for query in session.query(MainDB.TVShow).with_entities( MainDB.TVShow.series_id, MainDB.TVShow.series_provider_id, MainDB.TVShow.name, MainDB.TVShow.location): try: # if not os.path.isdir(query.location) and self.config.general.create_missing_show_dirs: # make_dir(query.location) self.log.info('Loading show {}'.format(query.name)) self.shows.update({ (query.series_id, query.series_provider_id): TVShow(query.series_id, query.series_provider_id) }) except Exception as e: self.log.debug('There was an error loading show: {}'.format( query.name)) self.loading_shows = False self.log.info('Loading initial shows list finished') def startup_message(self): self.log.info("SiCKRAGE :: STARTED") self.log.info(f"SiCKRAGE :: APP VERSION:[{sickrage.version()}]") self.log.info( f"SiCKRAGE :: CONFIG VERSION:[v{self.config.db.version}]") self.log.info( f"SiCKRAGE :: DATABASE VERSION:[v{self.main_db.version}]") self.log.info(f"SiCKRAGE :: DATABASE TYPE:[{self.db_type}]") self.log.info( f"SiCKRAGE :: INSTALL TYPE:[{self.version_updater.updater.type}]") self.log.info( f"SiCKRAGE :: URL:[{('http', 'https')[self.config.general.enable_https]}://{(get_internal_ip(), self.web_host)[self.web_host not in ['', '0.0.0.0']]}:{self.config.general.web_port}/{self.config.general.web_root.lstrip('/')}]" ) def launch_browser(self): if not self.no_launch and self.config.general.launch_browser: launch_browser( protocol=('http', 'https')[self.config.general.enable_https], host=(get_internal_ip(), self.web_host)[self.web_host != ''], startport=self.config.general.web_port) def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS {}!!!'.format( ('SHUTTING DOWN', 'RESTARTING')[restart])) # shutdown scheduler if self.scheduler: try: self.scheduler.shutdown() except (SchedulerNotRunningError, RuntimeError): pass # shutdown webserver if self.wserver: self.wserver.shutdown() # stop queues self.search_queue.shutdown() self.show_queue.shutdown() self.postprocessor_queue.shutdown() # stop amqp consumer self.amqp_client.stop() # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save shows self.log.info('Saving all shows to the database') for show in self.shows.values(): show.save() # save settings self.config.save() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if self.daemon: self.daemon.stop() self.started = False def restart(self): self.shutdown(restart=True) def shutdown_trigger(self): if not self.started: IOLoop.current().stop()
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = IOLoop() self.pid = os.getpid() self.showlist = [] self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() self.config_file = None self.data_dir = None self.cache_dir = None self.quite = None self.no_launch = None self.web_port = None self.developer = None self.debug = None self.newest_version = None self.newest_version_string = None self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ( "1x02", "s01e02", "S01E02", "01x02", "S01 E02", ) self.naming_multi_ep_type = { 0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")] } self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format( platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [ language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language ] self.sys_encoding = get_sys_encoding() self.client_web_urls = {'torrent': '', 'newznab': ''} self.adba_connection = None self.notifier_providers = None self.metadata_providers = {} self.search_providers = None self.log = None self.config = None self.alerts = None self.main_db = None self.cache_db = None self.scheduler = None self.wserver = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.daily_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.oidc_client = None self.quicksearch_cache = None def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # patch modules with encoding kludge patch_modules() # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.scheduler = TornadoScheduler() self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect( client_id='sickrage-app', client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # upgrade database db.upgrade() # compact main database if self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = get_backlog_cycle_time() if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger(hours=self.config.version_updater_freq), name=self.version_updater.name, id=self.version_updater.name) # add network timezones updater job self.scheduler.add_job(update_network_dict, IntervalTrigger(days=1), name="TZUPDATER", id="TZUPDATER") # add show updater job self.scheduler.add_job(self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace( hour=self.config.showupdate_hour)), name=self.show_updater.name, id=self.show_updater.name) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger(minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.daily_searcher.name, id=self.daily_searcher.name) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger(minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)), name=self.backlog_searcher.name, id=self.backlog_searcher.name) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger(minutes=self.config.autopostprocessor_freq), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger(minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval]), name=self.proper_searcher.name, id=self.proper_searcher.name) # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.run, IntervalTrigger(hours=1), name=self.trakt_searcher.name, id=self.trakt_searcher.name) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger(hours=self.config.subtitle_searcher_freq), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime), name=self.upnp_client.name, id=self.upnp_client.name) # start scheduler service self.scheduler.start() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() # start ioloop self.io_loop.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver if self.wserver: self.wserver.shutdown() # shutdown show queue if self.show_queue: self.log.debug("Shutting down show queue") self.show_queue.shutdown() del self.show_queue # shutdown search queue if self.search_queue: self.log.debug("Shutting down search queue") self.search_queue.shutdown() del self.search_queue # shutdown post-processor queue if self.postprocessor_queue: self.log.debug("Shutting down post-processor queue") self.postprocessor_queue.shutdown() del self.postprocessor_queue # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save all show and config settings self.save_all() # close databases for db in [self.main_db, self.cache_db]: if db.opened: self.log.debug( "Shutting down {} database connection".format(db.name)) db.close() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False self.io_loop.stop() def save_all(self): # write all shows self.log.info("Saving all shows to the database") for show in self.showlist: try: show.saveToDB() except Exception: continue # save config self.config.save() def load_shows(self): """ Populates the showlist and quicksearch cache with shows and episodes from the database """ self.quicksearch_cache.load() for dbData in self.main_db.all('tv_shows'): try: self.log.debug("Loading data for show: [{}]".format( dbData['show_name'])) self.showlist.append( TVShow(int(dbData['indexer']), int(dbData['indexer_id']))) self.quicksearch_cache.add_show(dbData['indexer_id']) except Exception as e: self.log.debug("Show error in [%s]: %s" % (dbData['location'], str(e)))
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = None self.pid = os.getpid() try: self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() except Exception: self.tz = tz.tzlocal() self.private_key = None self.public_key = None self.main_db = None self.cache_db = None self.config_file = None self.data_dir = None self.cache_dir = None self.quiet = None self.no_launch = None self.disable_updates = None self.web_port = None self.developer = None self.db_type = None self.db_prefix = None self.db_host = None self.db_port = None self.db_username = None self.db_password = None self.debug = None self.newest_version_string = None self.oidc_client_id = 'sickrage-app' self.oidc_client_secret = '5d4710b2-ca70-4d39-b5a3-0705e2c5e703' self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",) self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]} self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language] self.client_web_urls = {'torrent': '', 'newznab': ''} self.notifier_providers = {} self.metadata_providers = {} self.search_providers = {} self.adba_connection = None self.log = None self.config = None self.alerts = None self.scheduler = None self.wserver = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.tz_updater = None self.rsscache_updater = None self.daily_searcher = None self.failed_snatch_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.oidc_client = None self.quicksearch_cache = None def start(self): self.started = True self.io_loop = IOLoop.current() # thread name threading.currentThread().setName('CORE') # init core classes self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'}) self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect(client_id=self.oidc_client_id, client_secret=self.oidc_client_secret) # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # init encryption public and private keys encryption.initialize() # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random uses_netloc.append('scgi') FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = get_free_space(self.data_dir) if available_space < 100: self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # perform integrity check db.integrity_check() # migrate database db.migrate() # sync database repo db.sync_db_repo() # cleanup db.cleanup() # load name cache self.name_cache.load() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 # add API token refresh job self.scheduler.add_job( API().refresh_token, IntervalTrigger( hours=1, ), name='SR-API', id='SR-API' ) # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger( hours=self.config.version_updater_freq, ), name=self.version_updater.name, id=self.version_updater.name ) # add network timezones updater job self.scheduler.add_job( self.tz_updater.run, IntervalTrigger( days=1, ), name=self.tz_updater.name, id=self.tz_updater.name ) # add show updater job self.scheduler.add_job( self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour) ), name=self.show_updater.name, id=self.show_updater.name ) # add rss cache updater job self.scheduler.add_job( self.rsscache_updater.run, IntervalTrigger( minutes=15, ), name=self.rsscache_updater.name, id=self.rsscache_updater.name ) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.daily_searcher.name, id=self.daily_searcher.name ) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger( hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30) ), name=self.backlog_searcher.name, id=self.backlog_searcher.name ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger( minutes=self.config.autopostprocessor_freq ), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name ) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger( minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval] ), name=self.proper_searcher.name, id=self.proper_searcher.name ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.run, IntervalTrigger( hours=1 ), name=self.trakt_searcher.name, id=self.trakt_searcher.name ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger( hours=self.config.subtitle_searcher_freq ), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name ) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger( seconds=self.upnp_client._nat_portmap_lifetime ), name=self.upnp_client.name, id=self.upnp_client.name ) # add namecache update job self.scheduler.add_job( self.name_cache.build_all, IntervalTrigger( days=1, ), name=self.name_cache.name, id=self.name_cache.name ) # start scheduler service self.scheduler.start() # start queue's self.io_loop.add_callback(self.search_queue.watch) self.io_loop.add_callback(self.show_queue.watch) self.io_loop.add_callback(self.postprocessor_queue.watch) # fire off startup events self.io_loop.run_in_executor(None, self.quicksearch_cache.run) self.io_loop.run_in_executor(None, self.name_cache.run) self.io_loop.run_in_executor(None, self.version_updater.run) self.io_loop.run_in_executor(None, self.tz_updater.run) # start web server self.wserver.start() # launch browser window if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]): self.io_loop.run_in_executor(None, functools.partial(launch_browser, ('http', 'https')[sickrage.app.config.enable_https], sickrage.app.config.web_host, sickrage.app.config.web_port)) def started(): self.log.info("SiCKRAGE :: STARTED") self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version())) self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version)) self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version)) self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type)) self.log.info("SiCKRAGE :: URL:[{}://{}:{}{}]".format(('http', 'https')[self.config.enable_https], self.config.web_host, self.config.web_port, self.config.web_root)) # start io_loop self.io_loop.add_callback(started) self.io_loop.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver if self.wserver: self.wserver.shutdown() # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save settings self.config.save() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False if self.io_loop: self.io_loop.stop()
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = IOLoop().instance() self.pid = os.getpid() self.tz = tz.tzlocal() self.config_file = None self.data_dir = None self.cache_dir = None self.quite = None self.no_launch = None self.web_port = None self.developer = None self.debug = None self.newest_version = None self.newest_version_string = None self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1())) self.sys_encoding = get_sys_encoding() self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language] self.showlist = [] self.api = None self.adba_connection = None self.notifier_providers = None self.metadata_providers = None self.search_providers = None self.log = None self.config = None self.alerts = None self.main_db = None self.cache_db = None self.failed_db = None self.scheduler = None self.wserver = None self.wsession = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.daily_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None # patch modules with encoding kludge patch_modules() def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.api = API() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.failed_db = FailedDB() self.scheduler = BackgroundScheduler() self.wserver = WebServer() self.wsession = WebSession() self.google_auth = GoogleAuth() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db, self.failed_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time() if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger( hours=self.config.version_updater_freq ), name="VERSIONUPDATER", id="VERSIONUPDATER" ) # add network timezones updater job self.scheduler.add_job( update_network_dict, IntervalTrigger( days=1 ), name="TZUPDATER", id="TZUPDATER" ) # add show updater job self.scheduler.add_job( self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour) ), name="SHOWUPDATER", id="SHOWUPDATER" ) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name="DAILYSEARCHER", id="DAILYSEARCHER" ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30) ), name="BACKLOG", id="BACKLOG" ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger( minutes=self.config.autopostprocessor_freq ), name="POSTPROCESSOR", id="POSTPROCESSOR" ) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger( minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ self.config.proper_searcher_interval] ), name="PROPERSEARCHER", id="PROPERSEARCHER" ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.run, IntervalTrigger( hours=1 ), name="TRAKTSEARCHER", id="TRAKTSEARCHER" ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger( hours=self.config.subtitle_searcher_freq ), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER" ) # start scheduler service self.scheduler.start() # Pause/Resume PROPERSEARCHER job (self.scheduler.get_job('PROPERSEARCHER').pause, self.scheduler.get_job('PROPERSEARCHER').resume )[self.config.download_propers]() # Pause/Resume TRAKTSEARCHER job (self.scheduler.get_job('TRAKTSEARCHER').pause, self.scheduler.get_job('TRAKTSEARCHER').resume )[self.config.use_trakt]() # Pause/Resume SUBTITLESEARCHER job (self.scheduler.get_job('SUBTITLESEARCHER').pause, self.scheduler.get_job('SUBTITLESEARCHER').resume )[self.config.use_subtitles]() # Pause/Resume POSTPROCESS job (self.scheduler.get_job('POSTPROCESSOR').pause, self.scheduler.get_job('POSTPROCESSOR').resume )[self.config.process_automatically]() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver self.wserver.shutdown() # shutdown show queue if self.show_queue: self.log.debug("Shutting down show queue") self.show_queue.shutdown() del self.show_queue # shutdown search queue if self.search_queue: self.log.debug("Shutting down search queue") self.search_queue.shutdown() del self.search_queue # shutdown post-processor queue if self.postprocessor_queue: self.log.debug("Shutting down post-processor queue") self.postprocessor_queue.shutdown() del self.postprocessor_queue # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save all show and config settings self.save_all() # close databases for db in [self.main_db, self.cache_db, self.failed_db]: if db.opened: self.log.debug("Shutting down {} database connection".format(db.name)) db.close() # shutdown logging self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False def save_all(self): # write all shows self.log.info("Saving all shows to the database") for show in self.showlist: try: show.saveToDB() except Exception: continue # save config self.config.save() def load_shows(self): """ Populates the showlist with shows from the database """ for dbData in [x['doc'] for x in self.main_db.db.all('tv_shows', with_doc=True)]: try: self.log.debug("Loading data for show: [{}]".format(dbData['show_name'])) show = TVShow(int(dbData['indexer']), int(dbData['indexer_id'])) show.nextEpisode() self.showlist += [show] except Exception as e: self.log.error("Show error in [%s]: %s" % (dbData['location'], e.message))
class Core(object): def __init__(self): self.started = False self.loading_shows = False self.daemon = None self.pid = os.getpid() try: self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() except Exception: self.tz = tz.tzlocal() self.shows = {} self.shows_recent = deque(maxlen=5) self.private_key = None self.public_key = None self.main_db = None self.cache_db = None self.config_file = None self.data_dir = None self.cache_dir = None self.quiet = None self.no_launch = None self.disable_updates = None self.web_port = None self.web_host = None self.web_root = None self.developer = None self.db_type = None self.db_prefix = None self.db_host = None self.db_port = None self.db_username = None self.db_password = None self.debug = None self.newest_version_string = None self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",) self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]} self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language] self.client_web_urls = {'torrent': '', 'newznab': ''} self.notifier_providers = {} self.metadata_providers = {} self.search_providers = {} self.adba_connection = None self.log = None self.config = None self.alerts = None self.scheduler = None self.wserver = None self.google_auth = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.tz_updater = None self.rsscache_updater = None self.daily_searcher = None self.failed_snatch_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.auth_server = None self.announcements = None self.api = None def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # event loop policy that allows loop creation on any thread. asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) # scheduler self.scheduler = BackgroundScheduler({'apscheduler.timezone': 'UTC'}) # init core classes self.api = API() self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.wserver = WebServer() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.announcements = Announcements() # authorization sso client self.auth_server = AuthServer() # check available space try: self.log.info("Performing disk space checks") total_space, available_space = get_free_space(self.data_dir) if available_space < 100: self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): self.log.info('Performing restore of backup files') success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success]) if success: # self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) # self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # init encryption public and private keys encryption.initialize() # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # perform database startup actions for db in [self.main_db, self.cache_db]: # perform integrity check self.log.info("Performing integrity check on {} database".format(db.name)) db.integrity_check() # migrate database self.log.info("Performing migrations on {} database".format(db.name)) db.migrate() # upgrade database self.log.info("Performing upgrades on {} database".format(db.name)) db.upgrade() # cleanup self.log.info("Performing cleanup on {} database".format(db.name)) db.cleanup() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random uses_netloc.append('scgi') FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 # add version checker job self.scheduler.add_job( self.version_updater.task, IntervalTrigger( hours=self.config.version_updater_freq, timezone='utc' ), name=self.version_updater.name, id=self.version_updater.name ) # add network timezones updater job self.scheduler.add_job( self.tz_updater.task, IntervalTrigger( days=1, timezone='utc' ), name=self.tz_updater.name, id=self.tz_updater.name ) # add show updater job self.scheduler.add_job( self.show_updater.task, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour), timezone='utc' ), name=self.show_updater.name, id=self.show_updater.name ) # add rss cache updater job self.scheduler.add_job( self.rsscache_updater.task, IntervalTrigger( minutes=15, timezone='utc' ), name=self.rsscache_updater.name, id=self.rsscache_updater.name ) # add daily search job self.scheduler.add_job( self.daily_searcher.task, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc' ), name=self.daily_searcher.name, id=self.daily_searcher.name ) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.task, IntervalTrigger( hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc' ), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.task, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30), timezone='utc' ), name=self.backlog_searcher.name, id=self.backlog_searcher.name ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.task, IntervalTrigger( minutes=self.config.autopostprocessor_freq, timezone='utc' ), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name ) # add find proper job self.scheduler.add_job( self.proper_searcher.task, IntervalTrigger( minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval], timezone='utc' ), name=self.proper_searcher.name, id=self.proper_searcher.name ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.task, IntervalTrigger( hours=1, timezone='utc' ), name=self.trakt_searcher.name, id=self.trakt_searcher.name ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.task, IntervalTrigger( hours=self.config.subtitle_searcher_freq, timezone='utc' ), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name ) # add upnp client job self.scheduler.add_job( self.upnp_client.task, IntervalTrigger( seconds=self.upnp_client._nat_portmap_lifetime, timezone='utc' ), name=self.upnp_client.name, id=self.upnp_client.name ) # add announcements job self.scheduler.add_job( self.announcements.task, IntervalTrigger( minutes=15, timezone='utc' ), name=self.announcements.name, id=self.announcements.name ) # add provider URL update job self.scheduler.add_job( self.search_providers.task, IntervalTrigger( hours=1, timezone='utc' ), name=self.search_providers.name, id=self.search_providers.name ) # start queues self.search_queue.start_worker(self.config.max_queue_workers) self.show_queue.start_worker(self.config.max_queue_workers) self.postprocessor_queue.start_worker(self.config.max_queue_workers) # start web server self.wserver.start() # fire off jobs now self.scheduler.get_job(self.version_updater.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.tz_updater.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.announcements.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.search_providers.name).modify(next_run_time=datetime.datetime.utcnow()) # start scheduler service self.scheduler.start() # load shows self.scheduler.add_job(self.load_shows) # launch browser window if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]): self.scheduler.add_job(launch_browser, args=[('http', 'https')[sickrage.app.config.enable_https], sickrage.app.config.web_host, sickrage.app.config.web_port]) self.log.info("SiCKRAGE :: STARTED") self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version())) self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version)) self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version)) self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type)) self.log.info("SiCKRAGE :: URL:[{}://{}:{}/{}]".format(('http', 'https')[self.config.enable_https], (self.config.web_host, get_lan_ip())[self.config.web_host == '0.0.0.0'], self.config.web_port, self.config.web_root)) def load_shows(self): threading.currentThread().setName('CORE') session = self.main_db.session() self.log.info('Loading initial shows list') self.loading_shows = True self.shows = {} for query in session.query(MainDB.TVShow).with_entities(MainDB.TVShow.indexer_id, MainDB.TVShow.indexer, MainDB.TVShow.name): try: self.log.info('Loading show {}'.format(query.name)) self.shows.update({(query.indexer_id, query.indexer): TVShow(query.indexer_id, query.indexer)}) except Exception as e: self.log.debug('There was an error loading show: {}'.format(query.name)) self.loading_shows = False self.log.info('Loading initial shows list finished') def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS {}!!!'.format(('SHUTTING DOWN', 'RESTARTING')[restart])) # shutdown scheduler if self.scheduler: try: self.scheduler.shutdown() except (SchedulerNotRunningError, RuntimeError): pass # shutdown webserver if self.wserver: self.wserver.shutdown() # stop queues self.search_queue.shutdown() self.show_queue.shutdown() self.postprocessor_queue.shutdown() # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save shows self.log.info('Saving all shows to the database') for show in self.shows.values(): show.save() # save settings self.config.save() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = None self.pid = os.getpid() self.showlist = [] try: self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() except Exception: self.tz = tz.tzlocal() self.config_file = None self.data_dir = None self.cache_dir = None self.quiet = None self.no_launch = None self.web_port = None self.developer = None self.debug = None self.newest_version_string = None self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02d E%(episodenumber)02d") self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d", "S%(seasonnumber)02 dE%(episodenumber)02d") self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",) self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type), 1: [" - " + x for x in self.naming_ep_type], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]} self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat") self.naming_sep_type = (" - ", " ") self.naming_sep_type_text = (" - ", "space") self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1())) self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language] self.sys_encoding = get_sys_encoding() self.client_web_urls = {'torrent': '', 'newznab': ''} self.adba_connection = None self.notifier_providers = None self.metadata_providers = {} self.search_providers = None self.log = None self.config = None self.alerts = None self.main_db = None self.cache_db = None self.scheduler = None self.wserver = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.event_queue = None self.version_updater = None self.show_updater = None self.tz_updater = None self.rsscache_updater = None self.daily_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None self.upnp_client = None self.oidc_client = None self.quicksearch_cache = None def start(self): self.started = True self.io_loop = IOLoop.current() # thread name threading.currentThread().setName('CORE') # patch modules with encoding kludge patch_modules() # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.scheduler = TornadoScheduler() self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.event_queue = EventQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect(client_id='sickrage-app', client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703') # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success])) if success: shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # upgrade database db.upgrade() # compact main database if self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger( hours=self.config.version_updater_freq, ), name=self.version_updater.name, id=self.version_updater.name ) # add network timezones updater job self.scheduler.add_job( self.tz_updater.run, IntervalTrigger( days=1, ), name=self.tz_updater.name, id=self.tz_updater.name ) # add show updater job self.scheduler.add_job( self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour) ), name=self.show_updater.name, id=self.show_updater.name ) # add rss cache updater job self.scheduler.add_job( self.rsscache_updater.run, IntervalTrigger( minutes=15, ), name=self.rsscache_updater.name, id=self.rsscache_updater.name ) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.daily_searcher.name, id=self.daily_searcher.name ) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger( hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30) ), name=self.backlog_searcher.name, id=self.backlog_searcher.name ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger( minutes=self.config.autopostprocessor_freq ), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name ) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger( minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval] ), name=self.proper_searcher.name, id=self.proper_searcher.name ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.run, IntervalTrigger( hours=1 ), name=self.trakt_searcher.name, id=self.trakt_searcher.name ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger( hours=self.config.subtitle_searcher_freq ), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name ) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger( seconds=self.upnp_client._nat_portmap_lifetime ), name=self.upnp_client.name, id=self.upnp_client.name ) # add namecache update job self.scheduler.add_job( self.name_cache.build_all, IntervalTrigger( days=1, ), name=self.name_cache.name, id=self.name_cache.name ) # start scheduler service self.scheduler.start() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() self.event_queue.start() # fire off startup events self.event_queue.fire_event(self.name_cache.build_all) self.event_queue.fire_event(self.version_updater.run) self.event_queue.fire_event(self.tz_updater.run) # start webserver self.wserver.start() # launch browser window if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]): self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https], sickrage.app.config.web_host, sickrage.app.config.web_port)) # start ioloop self.io_loop.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver if self.wserver: self.wserver.shutdown() # shutdown show queue if self.show_queue: self.log.debug("Shutting down show queue") self.show_queue.shutdown() del self.show_queue # shutdown search queue if self.search_queue: self.log.debug("Shutting down search queue") self.search_queue.shutdown() del self.search_queue # shutdown post-processor queue if self.postprocessor_queue: self.log.debug("Shutting down post-processor queue") self.postprocessor_queue.shutdown() del self.postprocessor_queue # shutdown event queue if self.event_queue: self.log.debug("Shutting down event queue") self.event_queue.shutdown() del self.event_queue # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save all show and config settings self.save_all() # close databases for db in [self.main_db, self.cache_db]: if db.opened: self.log.debug("Shutting down {} database connection".format(db.name)) db.close() # shutdown logging if self.log: self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False if self.io_loop: self.io_loop.stop() def save_all(self): # write all shows self.log.info("Saving all shows to the database") for show in self.showlist: try: show.save_to_db() except Exception: continue # save config self.config.save() def load_shows(self): """ Populates the showlist and quicksearch cache with shows and episodes from the database """ self.quicksearch_cache.load() for dbData in self.main_db.all('tv_shows'): show = TVShow(int(dbData['indexer']), int(dbData['indexer_id'])) try: self.log.debug("Loading data for show: [{}]".format(show.name)) self.showlist.append(show) self.quicksearch_cache.add_show(show.indexerid) except Exception as e: self.log.debug("Show error in [%s]: %s" % (show.location, str(e)))
class Core(object): def __init__(self): self.started = False self.daemon = None self.io_loop = IOLoop().instance() self.pid = os.getpid() self.tz = tz.tzlocal() self.config_file = None self.data_dir = None self.cache_dir = None self.quite = None self.no_launch = None self.web_port = None self.developer = None self.debug = None self.newest_version = None self.newest_version_string = None self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format( platform.system(), platform.release(), str(uuid.uuid1())) self.sys_encoding = get_sys_encoding() self.languages = [ language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language ] self.showlist = [] self.api = None self.adba_connection = None self.notifier_providers = None self.metadata_providers = None self.search_providers = None self.log = None self.config = None self.alerts = None self.main_db = None self.cache_db = None self.failed_db = None self.scheduler = None self.wserver = None self.wsession = None self.google_auth = None self.name_cache = None self.show_queue = None self.search_queue = None self.postprocessor_queue = None self.version_updater = None self.show_updater = None self.daily_searcher = None self.backlog_searcher = None self.proper_searcher = None self.trakt_searcher = None self.subtitle_searcher = None self.auto_postprocessor = None # patch modules with encoding kludge patch_modules() def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.api = API() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.failed_db = FailedDB() self.scheduler = BackgroundScheduler() self.wserver = WebServer() self.wsession = WebSession() self.google_auth = GoogleAuth() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db, self.failed_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.app.developer and self.config.last_db_compact < time.time( ) - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time( ) if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger(hours=self.config.version_updater_freq), name="VERSIONUPDATER", id="VERSIONUPDATER") # add network timezones updater job self.scheduler.add_job(update_network_dict, IntervalTrigger(days=1), name="TZUPDATER", id="TZUPDATER") # add show updater job self.scheduler.add_job(self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace( hour=self.config.showupdate_hour)), name="SHOWUPDATER", id="SHOWUPDATER") # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger(minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name="DAILYSEARCHER", id="DAILYSEARCHER") # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger(minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)), name="BACKLOG", id="BACKLOG") # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger(minutes=self.config.autopostprocessor_freq), name="POSTPROCESSOR", id="POSTPROCESSOR") # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger(minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval]), name="PROPERSEARCHER", id="PROPERSEARCHER") # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.run, IntervalTrigger(hours=1), name="TRAKTSEARCHER", id="TRAKTSEARCHER") # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger(hours=self.config.subtitle_searcher_freq), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER") # start scheduler service self.scheduler.start() # Pause/Resume PROPERSEARCHER job (self.scheduler.get_job('PROPERSEARCHER').pause, self.scheduler.get_job('PROPERSEARCHER').resume )[self.config.download_propers]() # Pause/Resume TRAKTSEARCHER job (self.scheduler.get_job('TRAKTSEARCHER').pause, self.scheduler.get_job('TRAKTSEARCHER').resume )[self.config.use_trakt]() # Pause/Resume SUBTITLESEARCHER job (self.scheduler.get_job('SUBTITLESEARCHER').pause, self.scheduler.get_job('SUBTITLESEARCHER').resume )[self.config.use_subtitles]() # Pause/Resume POSTPROCESS job (self.scheduler.get_job('POSTPROCESSOR').pause, self.scheduler.get_job('POSTPROCESSOR').resume )[self.config.process_automatically]() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() def shutdown(self, restart=False): if self.started: self.log.info('SiCKRAGE IS SHUTTING DOWN!!!') # shutdown webserver self.wserver.shutdown() # shutdown show queue if self.show_queue: self.log.debug("Shutting down show queue") self.show_queue.shutdown() del self.show_queue # shutdown search queue if self.search_queue: self.log.debug("Shutting down search queue") self.search_queue.shutdown() del self.search_queue # shutdown post-processor queue if self.postprocessor_queue: self.log.debug("Shutting down post-processor queue") self.postprocessor_queue.shutdown() del self.postprocessor_queue # log out of ADBA if self.adba_connection: self.log.debug("Shutting down ANIDB connection") self.adba_connection.stop() # save all show and config settings self.save_all() # close databases for db in [self.main_db, self.cache_db, self.failed_db]: if db.opened: self.log.debug( "Shutting down {} database connection".format(db.name)) db.close() # shutdown logging self.log.close() if restart: os.execl(sys.executable, sys.executable, *sys.argv) if sickrage.app.daemon: sickrage.app.daemon.stop() self.started = False def save_all(self): # write all shows self.log.info("Saving all shows to the database") for show in self.showlist: try: show.saveToDB() except Exception: continue # save config self.config.save() def load_shows(self): """ Populates the showlist with shows from the database """ for dbData in [ x['doc'] for x in self.main_db.db.all('tv_shows', with_doc=True) ]: try: self.log.debug("Loading data for show: [{}]".format( dbData['show_name'])) show = TVShow(int(dbData['indexer']), int(dbData['indexer_id'])) show.nextEpisode() self.showlist += [show] except Exception as e: self.log.error("Show error in [%s]: %s" % (dbData['location'], e.message))