def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile( os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join( sickrage.DATA_DIR, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE self.srLogger.logFile = self.srConfig.LOG_FILE # start logger self.srLogger.start() # Check available space try: total_space, available_space = getFreeSpace(sickrage.DATA_DIR) if available_space < 100: self.srLogger.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) sickrage.restart = False return except: self.srLogger.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [MainDB, CacheDB, FailedDB]: # initialize the database db().initialize() # migrate the database db().migrate() # compact the main database db().compact() # load data for shows from database self.load_shows() # build name cache self.NAMECACHE.build() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, folder), ignore_errors=True) except Exception: continue # init anidb connection if not self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection( keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # initialize metadata_providers for cur_metadata_tuple in [ (self.srConfig.METADATA_KODI, kodi), (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus), (self.srConfig.METADATA_MEDIABROWSER, mediabrowser), (self.srConfig.METADATA_PS3, ps3), (self.srConfig.METADATA_WDTV, wdtv), (self.srConfig.METADATA_TIVO, tivo), (self.srConfig.METADATA_MEDE8ER, mede8er) ]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) self.metadataProviderDict[tmp_provider.name] = tmp_provider # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{ 'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ }), name="VERSIONUPDATER", id="VERSIONUPDATER") # add network timezones updater job self.srScheduler.add_job(update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER") # add namecache updater job self.srScheduler.add_job( self.NAMECACHE.run, srIntervalTrigger( **{ 'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ }), name="NAMECACHE", id="NAMECACHE") # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{ 'hours': 1, 'start_date': datetime.datetime.now().replace( hour=self.srConfig.SHOWUPDATE_HOUR) }), name="SHOWUPDATER", id="SHOWUPDATER") # add daily search job self.srScheduler.add_job( self.DAILYSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ }), name="DAILYSEARCHER", id="DAILYSEARCHER") # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ }), name="BACKLOG", id="BACKLOG") # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger( **{ 'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ }), name="POSTPROCESSOR", id="POSTPROCESSOR") # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger( **{ 'minutes': { '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.srConfig.PROPER_SEARCHER_INTERVAL] }), name="PROPERSEARCHER", id="PROPERSEARCHER") # add trakt.tv checker job self.srScheduler.add_job(self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER") # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger( **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER") # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start queue's self.SEARCHQUEUE.start() self.SHOWQUEUE.start() # start webserver self.srWebServer.start() # start ioloop event handler IOLoop.current().start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join(sickrage.DATA_DIR, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE self.srLogger.logFile = self.srConfig.LOG_FILE # start logger self.srLogger.start() # initialize the main SB database main_db.MainDB().InitialSchema().upgrade() # initialize the cache database cache_db.CacheDB().InitialSchema().upgrade() # initialize the failed downloads database failed_db.FailedDB().InitialSchema().upgrade() # fix up any db problems main_db.MainDB().SanityCheck() # load data for shows from database self.load_shows() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for dir in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir), ignore_errors=True) except Exception: continue # init anidb connection if not self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # initialize metadata_providers for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi), (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus), (self.srConfig.METADATA_MEDIABROWSER, mediabrowser), (self.srConfig.METADATA_PS3, ps3), (self.srConfig.METADATA_WDTV, wdtv), (self.srConfig.METADATA_TIVO, tivo), (self.srConfig.METADATA_MEDE8ER, mede8er)]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) self.metadataProviderDict[tmp_provider.name] = tmp_provider # add show queue job self.srScheduler.add_job( self.SHOWQUEUE.run, srIntervalTrigger(**{'seconds': 5}), name="SHOWQUEUE", id="SHOWQUEUE" ) # add search queue job self.srScheduler.add_job( self.SEARCHQUEUE.run, srIntervalTrigger(**{'seconds': 5}), name="SEARCHQUEUE", id="SEARCHQUEUE" ) # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}), name="VERSIONUPDATER", id="VERSIONUPDATER" ) # add network timezones updater job self.srScheduler.add_job( update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER" ) # add namecache updater job self.srScheduler.add_job( self.NAMECACHE.run, srIntervalTrigger( **{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}), name="NAMECACHE", id="NAMECACHE" ) # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{'hours': 1, 'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}), name="SHOWUPDATER", id="SHOWUPDATER" ) # add daily search job self.srScheduler.add_job( self.DAILYSEARCHER.run, srIntervalTrigger( **{'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ}), name="DAILYSEARCHER", id="DAILYSEARCHER" ) # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ}), name="BACKLOG", id="BACKLOG" ) # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}), name="POSTPROCESSOR", id="POSTPROCESSOR" ) # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger(**{ 'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ self.srConfig.PROPER_SEARCHER_INTERVAL]}), name="PROPERSEARCHER", id="PROPERSEARCHER" ) # add trakt.tv checker job self.srScheduler.add_job( self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER" ) # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER" ) # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start webserver self.srWebServer.start() # start ioloop event handler IOLoop.instance().start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile( os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join( sickrage.DATA_DIR, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.logFile = self.srConfig.LOG_FILE self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE # start logger self.srLogger.start() # Check available space try: total_space, available_space = getFreeSpace(sickrage.DATA_DIR) if available_space < 100: self.srLogger.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) sickrage.restart = False return except: self.srLogger.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.mainDB, self.cacheDB, self.failedDB]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time( ) - 604800: # 7 days self.mainDB.compact() self.srConfig.LAST_DB_COMPACT = int(time.time()) # load data for shows from database self.load_shows() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection( keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time( ) if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{ 'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ }), name="VERSIONUPDATER", id="VERSIONUPDATER") # add network timezones updater job self.srScheduler.add_job(update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER") # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{ 'days': 1, 'start_date': datetime.datetime.now().replace( hour=self.srConfig.SHOWUPDATE_HOUR) }), name="SHOWUPDATER", id="SHOWUPDATER") # add show next episode job self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode, srIntervalTrigger(**{'hours': 1}), name="SHOWNEXTEP", id="SHOWNEXTEP") # add daily search job self.srScheduler.add_job(self.DAILYSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ, 'start_date': datetime.datetime.now() + datetime.timedelta(minutes=4) }), name="DAILYSEARCHER", id="DAILYSEARCHER") # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ, 'start_date': datetime.datetime.now() + datetime.timedelta(minutes=30) }), name="BACKLOG", id="BACKLOG") # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger( **{ 'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ }), name="POSTPROCESSOR", id="POSTPROCESSOR") # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger( **{ 'minutes': { '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.srConfig.PROPER_SEARCHER_INTERVAL] }), name="PROPERSEARCHER", id="PROPERSEARCHER") # add trakt.tv checker job self.srScheduler.add_job(self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER") # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger( **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER") # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start queue's self.SEARCHQUEUE.start() self.SHOWQUEUE.start() # start webserver self.srWebServer.start() self.srLogger.info("SiCKRAGE :: STARTED") self.srLogger.info("SiCKRAGE :: VERSION:[{}]".format( self.VERSIONUPDATER.version)) self.srLogger.info("SiCKRAGE :: CONFIG:[{}] [v{}]".format( sickrage.CONFIG_FILE, self.srConfig.CONFIG_VERSION)) self.srLogger.info("SiCKRAGE :: URL:[{}://{}:{}/]".format( ('http', 'https')[self.srConfig.ENABLE_HTTPS], self.srConfig.WEB_HOST, self.srConfig.WEB_PORT)) # launch browser window if all( [not sickrage.NOLAUNCH, sickrage.srCore.srConfig.LAUNCH_BROWSER]): threading.Thread( None, lambda: launch_browser( ('http', 'https')[sickrage.srCore.srConfig.ENABLE_HTTPS], self.srConfig.WEB_HOST, sickrage.srCore.srConfig.WEB_PORT) ).start() # start ioloop event handler self.io_loop.start()