def __init__(self, *args, **kwargs): super(SiCKRAGETestCase, self).__init__(*args, **kwargs) threading.currentThread().setName('TESTS') self.TESTALL = False self.TESTSKIPPED = ['test_issue_submitter', 'test_ssl_sni'] self.TESTDIR = os.path.abspath(os.path.dirname(__file__)) self.TESTDB_DIR = os.path.join(self.TESTDIR, 'database') self.TESTDBBACKUP_DIR = os.path.join(self.TESTDIR, 'db_backup') self.TEST_CONFIG = os.path.join(self.TESTDIR, 'config.ini') self.SHOWNAME = "show name" self.SEASON = 4 self.EPISODE = 2 self.FILENAME = "show name - s0" + str(self.SEASON) + "e0" + str( self.EPISODE) + ".mkv" self.FILEDIR = os.path.join(self.TESTDIR, self.SHOWNAME) self.FILEPATH = os.path.join(self.FILEDIR, self.FILENAME) self.SHOWDIR = os.path.join(self.TESTDIR, self.SHOWNAME + " final") sickrage.app = Core() sickrage.app.search_providers = SearchProviders() sickrage.app.log = Logger() sickrage.app.config = Config() sickrage.app.web_host = '0.0.0.0' sickrage.app.data_dir = self.TESTDIR sickrage.app.config_file = self.TEST_CONFIG sickrage.app.main_db = MainDB(db_type='sqlite', db_prefix='sickrage', db_host='localhost', db_port='3306', db_username='******', db_password='******') encryption.initialize() sickrage.app.config.load() sickrage.app.config.naming_pattern = 'Season.%0S/%S.N.S%0SE%0E.%E.N' sickrage.app.config.tv_download_dir = os.path.join( self.TESTDIR, 'Downloads') episode.TVEpisode.populate_episode = self._fake_specify_ep
def __init__(self, *args, **kwargs): super(SiCKRAGETestCase, self).__init__(*args, **kwargs) threading.currentThread().setName('TESTS') self.TESTALL = False self.TESTSKIPPED = ['test_issue_submitter', 'test_ssl_sni'] self.TESTDIR = os.path.abspath(os.path.dirname(__file__)) self.TESTDB_DIR = os.path.join(self.TESTDIR, 'database') self.TESTDBBACKUP_DIR = os.path.join(self.TESTDIR, 'db_backup') self.SHOWNAME = "show name" self.SEASON = 4 self.EPISODE = 2 self.FILENAME = "show name - s0" + str(self.SEASON) + "e0" + str( self.EPISODE) + ".mkv" self.FILEDIR = os.path.join(self.TESTDIR, self.SHOWNAME) self.FILEPATH = os.path.join(self.FILEDIR, self.FILENAME) self.SHOWDIR = os.path.join(self.TESTDIR, self.SHOWNAME + " final") sickrage.app = Core() sickrage.app.log = Logger() sickrage.app.data_dir = self.TESTDIR sickrage.app.config = Config() sickrage.app.config.quality_default = 4 # hdtv sickrage.app.config.naming_multi_ep = True sickrage.app.config.tv_download_dir = os.path.join( self.TESTDIR, 'Downloads') sickrage.app.config.ignore_words = "German,Core2HD" sickrage.app.search_providers = SearchProviders() sickrage.app.name_cache = NameCache() sickrage.app.search_providers.load() episode.TVEpisode.populateEpisode = self._fake_specify_ep
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # patch modules with encoding kludge patch_modules() # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.scheduler = TornadoScheduler() self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect( client_id='sickrage-app', client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # upgrade database db.upgrade() # compact main database if self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = get_backlog_cycle_time() if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger(hours=self.config.version_updater_freq), name=self.version_updater.name, id=self.version_updater.name) # add network timezones updater job self.scheduler.add_job(update_network_dict, IntervalTrigger(days=1), name="TZUPDATER", id="TZUPDATER") # add show updater job self.scheduler.add_job(self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace( hour=self.config.showupdate_hour)), name=self.show_updater.name, id=self.show_updater.name) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger(minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.daily_searcher.name, id=self.daily_searcher.name) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger(minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)), name=self.backlog_searcher.name, id=self.backlog_searcher.name) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger(minutes=self.config.autopostprocessor_freq), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger(minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval]), name=self.proper_searcher.name, id=self.proper_searcher.name) # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.run, IntervalTrigger(hours=1), name=self.trakt_searcher.name, id=self.trakt_searcher.name) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger(hours=self.config.subtitle_searcher_freq), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime), name=self.upnp_client.name, id=self.upnp_client.name) # start scheduler service self.scheduler.start() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() # start ioloop self.io_loop.start()
def start(self): self.started = True self.io_loop = IOLoop.current() # thread name threading.currentThread().setName('CORE') # init core classes self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'}) self.wserver = WebServer() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.quicksearch_cache = QuicksearchCache() # setup oidc client realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage') self.oidc_client = realm.open_id_connect(client_id=self.oidc_client_id, client_secret=self.oidc_client_secret) # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # init encryption public and private keys encryption.initialize() # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random uses_netloc.append('scgi') FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) # Check available space try: total_space, available_space = get_free_space(self.data_dir) if available_space < 100: self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db]: # perform integrity check db.integrity_check() # migrate database db.migrate() # sync database repo db.sync_db_repo() # cleanup db.cleanup() # load name cache self.name_cache.load() if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generate_secret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 # add API token refresh job self.scheduler.add_job( API().refresh_token, IntervalTrigger( hours=1, ), name='SR-API', id='SR-API' ) # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger( hours=self.config.version_updater_freq, ), name=self.version_updater.name, id=self.version_updater.name ) # add network timezones updater job self.scheduler.add_job( self.tz_updater.run, IntervalTrigger( days=1, ), name=self.tz_updater.name, id=self.tz_updater.name ) # add show updater job self.scheduler.add_job( self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour) ), name=self.show_updater.name, id=self.show_updater.name ) # add rss cache updater job self.scheduler.add_job( self.rsscache_updater.run, IntervalTrigger( minutes=15, ), name=self.rsscache_updater.name, id=self.rsscache_updater.name ) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.daily_searcher.name, id=self.daily_searcher.name ) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger( hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30) ), name=self.backlog_searcher.name, id=self.backlog_searcher.name ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger( minutes=self.config.autopostprocessor_freq ), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name ) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger( minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval] ), name=self.proper_searcher.name, id=self.proper_searcher.name ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.run, IntervalTrigger( hours=1 ), name=self.trakt_searcher.name, id=self.trakt_searcher.name ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger( hours=self.config.subtitle_searcher_freq ), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name ) # add upnp client job self.scheduler.add_job( self.upnp_client.run, IntervalTrigger( seconds=self.upnp_client._nat_portmap_lifetime ), name=self.upnp_client.name, id=self.upnp_client.name ) # add namecache update job self.scheduler.add_job( self.name_cache.build_all, IntervalTrigger( days=1, ), name=self.name_cache.name, id=self.name_cache.name ) # start scheduler service self.scheduler.start() # start queue's self.io_loop.add_callback(self.search_queue.watch) self.io_loop.add_callback(self.show_queue.watch) self.io_loop.add_callback(self.postprocessor_queue.watch) # fire off startup events self.io_loop.run_in_executor(None, self.quicksearch_cache.run) self.io_loop.run_in_executor(None, self.name_cache.run) self.io_loop.run_in_executor(None, self.version_updater.run) self.io_loop.run_in_executor(None, self.tz_updater.run) # start web server self.wserver.start() # launch browser window if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]): self.io_loop.run_in_executor(None, functools.partial(launch_browser, ('http', 'https')[sickrage.app.config.enable_https], sickrage.app.config.web_host, sickrage.app.config.web_port)) def started(): self.log.info("SiCKRAGE :: STARTED") self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version())) self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version)) self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version)) self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type)) self.log.info("SiCKRAGE :: URL:[{}://{}:{}{}]".format(('http', 'https')[self.config.enable_https], self.config.web_host, self.config.web_port, self.config.web_root)) # start io_loop self.io_loop.add_callback(started) self.io_loop.start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # event loop policy that allows loop creation on any thread. asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) # scheduler self.scheduler = BackgroundScheduler({'apscheduler.timezone': 'UTC'}) # init core classes self.api = API() self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.alerts = Notifications() self.wserver = WebServer() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.tz_updater = TimeZoneUpdater() self.rsscache_updater = RSSCacheUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() self.upnp_client = UPNPClient() self.announcements = Announcements() # authorization sso client self.auth_server = AuthServer() # check available space try: self.log.info("Performing disk space checks") total_space, available_space = get_free_space(self.data_dir) if available_space < 100: self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting disk space: %s', traceback.format_exc()) # check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): self.log.info('Performing restore of backup files') success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success]) if success: # self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) # self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password) shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # init encryption public and private keys encryption.initialize() # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quiet # start logger self.log.start() # perform database startup actions for db in [self.main_db, self.cache_db]: # perform integrity check self.log.info("Performing integrity check on {} database".format(db.name)) db.integrity_check() # migrate database self.log.info("Performing migrations on {} database".format(db.name)) db.migrate() # upgrade database self.log.info("Performing upgrades on {} database".format(db.name)) db.upgrade() # cleanup self.log.info("Performing cleanup on {} database".format(db.name)) db.cleanup() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random uses_netloc.append('scgi') FancyURLopener.version = self.user_agent # set torrent client web url torrent_webui_url(True) if self.config.default_page not in ('schedule', 'history', 'IRC'): self.config.default_page = 'home' # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 # add version checker job self.scheduler.add_job( self.version_updater.task, IntervalTrigger( hours=self.config.version_updater_freq, timezone='utc' ), name=self.version_updater.name, id=self.version_updater.name ) # add network timezones updater job self.scheduler.add_job( self.tz_updater.task, IntervalTrigger( days=1, timezone='utc' ), name=self.tz_updater.name, id=self.tz_updater.name ) # add show updater job self.scheduler.add_job( self.show_updater.task, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour), timezone='utc' ), name=self.show_updater.name, id=self.show_updater.name ) # add rss cache updater job self.scheduler.add_job( self.rsscache_updater.task, IntervalTrigger( minutes=15, timezone='utc' ), name=self.rsscache_updater.name, id=self.rsscache_updater.name ) # add daily search job self.scheduler.add_job( self.daily_searcher.task, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc' ), name=self.daily_searcher.name, id=self.daily_searcher.name ) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.task, IntervalTrigger( hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4), timezone='utc' ), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.task, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30), timezone='utc' ), name=self.backlog_searcher.name, id=self.backlog_searcher.name ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.task, IntervalTrigger( minutes=self.config.autopostprocessor_freq, timezone='utc' ), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name ) # add find proper job self.scheduler.add_job( self.proper_searcher.task, IntervalTrigger( minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval], timezone='utc' ), name=self.proper_searcher.name, id=self.proper_searcher.name ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.task, IntervalTrigger( hours=1, timezone='utc' ), name=self.trakt_searcher.name, id=self.trakt_searcher.name ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.task, IntervalTrigger( hours=self.config.subtitle_searcher_freq, timezone='utc' ), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name ) # add upnp client job self.scheduler.add_job( self.upnp_client.task, IntervalTrigger( seconds=self.upnp_client._nat_portmap_lifetime, timezone='utc' ), name=self.upnp_client.name, id=self.upnp_client.name ) # add announcements job self.scheduler.add_job( self.announcements.task, IntervalTrigger( minutes=15, timezone='utc' ), name=self.announcements.name, id=self.announcements.name ) # add provider URL update job self.scheduler.add_job( self.search_providers.task, IntervalTrigger( hours=1, timezone='utc' ), name=self.search_providers.name, id=self.search_providers.name ) # start queues self.search_queue.start_worker(self.config.max_queue_workers) self.show_queue.start_worker(self.config.max_queue_workers) self.postprocessor_queue.start_worker(self.config.max_queue_workers) # start web server self.wserver.start() # fire off jobs now self.scheduler.get_job(self.version_updater.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.tz_updater.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.announcements.name).modify(next_run_time=datetime.datetime.utcnow()) self.scheduler.get_job(self.search_providers.name).modify(next_run_time=datetime.datetime.utcnow()) # start scheduler service self.scheduler.start() # load shows self.scheduler.add_job(self.load_shows) # launch browser window if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]): self.scheduler.add_job(launch_browser, args=[('http', 'https')[sickrage.app.config.enable_https], sickrage.app.config.web_host, sickrage.app.config.web_port]) self.log.info("SiCKRAGE :: STARTED") self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version())) self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version)) self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version)) self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type)) self.log.info("SiCKRAGE :: URL:[{}://{}:{}/{}]".format(('http', 'https')[self.config.enable_https], (self.config.web_host, get_lan_ip())[self.config.web_host == '0.0.0.0'], self.config.web_port, self.config.web_root))