def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join(sickrage.DATA_DIR, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE self.srLogger.logFile = self.srConfig.LOG_FILE # start logger self.srLogger.start() # initialize the main SB database main_db.MainDB().InitialSchema().upgrade() # initialize the cache database cache_db.CacheDB().InitialSchema().upgrade() # initialize the failed downloads database failed_db.FailedDB().InitialSchema().upgrade() # fix up any db problems main_db.MainDB().SanityCheck() # load data for shows from database self.load_shows() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for dir in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir), ignore_errors=True) except Exception: continue # init anidb connection if not self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # initialize metadata_providers for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi), (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus), (self.srConfig.METADATA_MEDIABROWSER, mediabrowser), (self.srConfig.METADATA_PS3, ps3), (self.srConfig.METADATA_WDTV, wdtv), (self.srConfig.METADATA_TIVO, tivo), (self.srConfig.METADATA_MEDE8ER, mede8er)]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) self.metadataProviderDict[tmp_provider.name] = tmp_provider # add show queue job self.srScheduler.add_job( self.SHOWQUEUE.run, srIntervalTrigger(**{'seconds': 5}), name="SHOWQUEUE", id="SHOWQUEUE" ) # add search queue job self.srScheduler.add_job( self.SEARCHQUEUE.run, srIntervalTrigger(**{'seconds': 5}), name="SEARCHQUEUE", id="SEARCHQUEUE" ) # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}), name="VERSIONUPDATER", id="VERSIONUPDATER" ) # add network timezones updater job self.srScheduler.add_job( update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER" ) # add namecache updater job self.srScheduler.add_job( self.NAMECACHE.run, srIntervalTrigger( **{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}), name="NAMECACHE", id="NAMECACHE" ) # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{'hours': 1, 'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}), name="SHOWUPDATER", id="SHOWUPDATER" ) # add daily search job self.srScheduler.add_job( self.DAILYSEARCHER.run, srIntervalTrigger( **{'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ}), name="DAILYSEARCHER", id="DAILYSEARCHER" ) # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ}), name="BACKLOG", id="BACKLOG" ) # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}), name="POSTPROCESSOR", id="POSTPROCESSOR" ) # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger(**{ 'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ self.srConfig.PROPER_SEARCHER_INTERVAL]}), name="PROPERSEARCHER", id="PROPERSEARCHER" ) # add trakt.tv checker job self.srScheduler.add_job( self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER" ) # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER" ) # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start webserver self.srWebServer.start() # start ioloop event handler IOLoop.instance().start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # patch modules with encoding kludge patch_modules() # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.api = API() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.failed_db = FailedDB() self.scheduler = TornadoScheduler() self.wserver = WebServer() self.google_auth = GoogleAuth() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.failed_snatch_searcher = FailedSnatchSearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile( os.path.join(self.data_dir, 'sickrage.db'), os.path.join( self.data_dir, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db, self.failed_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.app.developer and self.config.last_db_compact < time.time( ) - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time( ) if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.failed_snatch_age < self.config.min_failed_snatch_age: self.config.failed_snatch_age = self.config.min_failed_snatch_age if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger(hours=self.config.version_updater_freq), name=self.version_updater.name, id=self.version_updater.name) # add network timezones updater job self.scheduler.add_job(update_network_dict, IntervalTrigger(days=1), name="TZUPDATER", id="TZUPDATER") # add show updater job self.scheduler.add_job(self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace( hour=self.config.showupdate_hour)), name=self.show_updater.name, id=self.show_updater.name) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger(minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.daily_searcher.name, id=self.daily_searcher.name) # add failed snatch search job self.scheduler.add_job( self.failed_snatch_searcher.run, IntervalTrigger(hours=1, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)), name=self.failed_snatch_searcher.name, id=self.failed_snatch_searcher.name) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger(minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)), name=self.backlog_searcher.name, id=self.backlog_searcher.name) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger(minutes=self.config.autopostprocessor_freq), name=self.auto_postprocessor.name, id=self.auto_postprocessor.name) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger(minutes={ '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.config.proper_searcher_interval]), name=self.proper_searcher.name, id=self.proper_searcher.name) # add trakt.tv checker job self.scheduler.add_job(self.trakt_searcher.run, IntervalTrigger(hours=1), name=self.trakt_searcher.name, id=self.trakt_searcher.name) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger(hours=self.config.subtitle_searcher_freq), name=self.subtitle_searcher.name, id=self.subtitle_searcher.name) # start scheduler service self.scheduler.start() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start() # start ioloop self.io_loop.start()
def initialize(): if not sickrage.INITIALIZED: with threading.Lock(): # init encoding encodingInit() # Check if we need to perform a restore first os.chdir(sickrage.DATA_DIR) restore_dir = os.path.join(sickrage.DATA_DIR, 'restore') if os.path.exists(restore_dir): success = restoreDB(restore_dir, sickrage.DATA_DIR) sickrage.LOGGER.info("Restore: restoring DB and config.ini %s!\n" % ("FAILED", "SUCCESSFUL")[success]) # init indexerApi sickrage.INDEXER_API = indexerApi # initialize notifiers sickrage.NOTIFIERS = AttrDict( libnotify=LibnotifyNotifier(), kodi_notifier=KODINotifier(), plex_notifier=PLEXNotifier(), emby_notifier=EMBYNotifier(), nmj_notifier=NMJNotifier(), nmjv2_notifier=NMJv2Notifier(), synoindex_notifier=synoIndexNotifier(), synology_notifier=synologyNotifier(), pytivo_notifier=pyTivoNotifier(), growl_notifier=GrowlNotifier(), prowl_notifier=ProwlNotifier(), libnotify_notifier=LibnotifyNotifier(), pushover_notifier=PushoverNotifier(), boxcar_notifier=BoxcarNotifier(), boxcar2_notifier=Boxcar2Notifier(), nma_notifier=NMA_Notifier(), pushalot_notifier=PushalotNotifier(), pushbullet_notifier=PushbulletNotifier(), freemobile_notifier=FreeMobileNotifier(), twitter_notifier=TwitterNotifier(), trakt_notifier=TraktNotifier(), email_notifier=EmailNotifier(), ) sickrage.NAMING_EP_TYPE = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d") sickrage.SPORTS_EP_TYPE = ("%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d") sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02", "01x02") sickrage.NAMING_MULTI_EP_TYPE = {0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE), 1: [" - " + x for x in sickrage.NAMING_EP_TYPE], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]} sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate", "repeat") sickrage.NAMING_SEP_TYPE = (" - ", " ") sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space") # migrate old database filenames to new ones if not os.path.exists(main_db.MainDB().filename) and os.path.exists("sickbeard.db"): helpers.moveFile("sickbeard.db", main_db.MainDB().filename) # init config file srConfig.load_config(sickrage.CONFIG_FILE, True) # set socket timeout socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT) # init logger sickrage.LOGGER = sickrage.LOGGER.__class__(logFile=sickrage.LOG_FILE, logSize=sickrage.LOG_SIZE, logNr=sickrage.LOG_NR, fileLogging=makeDir(sickrage.LOG_DIR), debugLogging=sickrage.DEBUG) # init updater and get current version sickrage.VERSIONUPDATER = VersionUpdater() sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version # initialize the main SB database main_db.MainDB().InitialSchema().upgrade() # initialize the cache database cache_db.CacheDB().InitialSchema().upgrade() # initialize the failed downloads database failed_db.FailedDB().InitialSchema().upgrade() # fix up any db problems main_db.MainDB().SanityCheck() if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): sickrage.DEFAULT_PAGE = 'home' if not makeDir(sickrage.CACHE_DIR): sickrage.LOGGER.error("!!! Creating local cache dir failed") sickrage.CACHE_DIR = None # Check if we need to perform a restore of the cache folder try: restore_dir = os.path.join(sickrage.DATA_DIR, 'restore') if os.path.exists(restore_dir) and os.path.exists(os.path.join(restore_dir, 'cache')): def restore_cache(srcdir, dstdir): def path_leaf(path): head, tail = os.path.split(path) return tail or os.path.basename(head) try: if os.path.isdir(dstdir): bakfilename = '{}-{1}'.format(path_leaf(dstdir), datetime.datetime.strftime(datetime.date.now(), '%Y%m%d_%H%M%S')) shutil.move(dstdir, os.path.join(os.path.dirname(dstdir), bakfilename)) shutil.move(srcdir, dstdir) sickrage.LOGGER.info("Restore: restoring cache successful") except Exception as E: sickrage.LOGGER.error("Restore: restoring cache failed: {}".format(E)) restore_cache(os.path.join(restore_dir, 'cache'), sickrage.CACHE_DIR) except Exception as e: sickrage.LOGGER.error("Restore: restoring cache failed: {}".format(e)) finally: if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')): try: removetree(os.path.join(sickrage.DATA_DIR, 'restore')) except Exception as e: sickrage.LOGGER.error("Restore: Unable to remove the restore directory: {}".format(e)) for cleanupDir in ['mako', 'sessions', 'indexers']: try: removetree(os.path.join(sickrage.CACHE_DIR, cleanupDir)) except Exception as e: sickrage.LOGGER.warning( "Restore: Unable to remove the cache/{} directory: {1}".format(cleanupDir, e)) if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535: sickrage.WEB_PORT = 8081 if not sickrage.WEB_COOKIE_SECRET: sickrage.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not sickrage.ANON_REDIRECT.endswith('?'): sickrage.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS): sickrage.ROOT_DIRS = '' sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders() if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): sickrage.NZB_METHOD = 'blackhole' if not sickrage.PROVIDER_ORDER: sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \ sickrage.providersDict[GenericProvider.TORRENT].keys() if sickrage.TORRENT_METHOD not in ( 'blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet'): sickrage.TORRENT_METHOD = 'blackhole' if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): sickrage.PROPER_SEARCHER_INTERVAL = 'daily' if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ: sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ: sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ: sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ: sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ: sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ if sickrage.SHOWUPDATE_HOUR > 23: sickrage.SHOWUPDATE_HOUR = 0 elif sickrage.SHOWUPDATE_HOUR < 0: sickrage.SHOWUPDATE_HOUR = 0 if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ: sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ if sickrage.SUBTITLES_LANGUAGES[0] == '': sickrage.SUBTITLES_LANGUAGES = [] sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace(":%S", "") # initialize metadata_providers sickrage.metadataProvideDict = get_metadata_generator_dict() for cur_metadata_tuple in [(sickrage.METADATA_KODI, kodi), (sickrage.METADATA_KODI_12PLUS, kodi_12plus), (sickrage.METADATA_MEDIABROWSER, mediabrowser), (sickrage.METADATA_PS3, ps3), (sickrage.METADATA_WDTV, wdtv), (sickrage.METADATA_TIVO, tivo), (sickrage.METADATA_MEDE8ER, mede8er)]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider # init caches sickrage.NAMECACHE = nameCache() # init queues sickrage.SHOWUPDATER = ShowUpdater() sickrage.SHOWQUEUE = ShowQueue() sickrage.SEARCHQUEUE = SearchQueue() # load data for shows from database sickrage.showList = load_shows() # init searchers sickrage.DAILYSEARCHER = DailySearcher() sickrage.BACKLOGSEARCHER = BacklogSearcher() sickrage.PROPERSEARCHER = ProperSearcher() sickrage.TRAKTSEARCHER = TraktSearcher() sickrage.SUBTITLESEARCHER = SubtitleSearcher() # init scheduler sickrage.Scheduler = Scheduler() # add version checker job to scheduler sickrage.Scheduler.add_job( sickrage.VERSIONUPDATER.run, SRIntervalTrigger( **{'hours': sickrage.VERSION_UPDATER_FREQ, 'min': sickrage.MIN_VERSION_UPDATER_FREQ}), name="VERSIONUPDATER", id="VERSIONUPDATER", replace_existing=True ) # add network timezones updater job to scheduler sickrage.Scheduler.add_job( update_network_dict, SRIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER", replace_existing=True ) # add namecache updater job to scheduler sickrage.Scheduler.add_job( sickrage.NAMECACHE.run, SRIntervalTrigger(**{'minutes': sickrage.NAMECACHE_FREQ, 'min': sickrage.MIN_NAMECACHE_FREQ}), name="NAMECACHE", id="NAMECACHE", replace_existing=True ) # add show queue job to scheduler sickrage.Scheduler.add_job( sickrage.SHOWQUEUE.run, SRIntervalTrigger(**{'seconds': 3}), name="SHOWQUEUE", id="SHOWQUEUE", replace_existing=True ) # add search queue job to scheduler sickrage.Scheduler.add_job( sickrage.SEARCHQUEUE.run, SRIntervalTrigger(**{'seconds': 1}), name="SEARCHQUEUE", id="SEARCHQUEUE", replace_existing=True ) # add show updater job to scheduler sickrage.Scheduler.add_job( sickrage.SHOWUPDATER.run, SRIntervalTrigger( **{'hours': 1, 'start_date': datetime.datetime.now().replace(hour=sickrage.SHOWUPDATE_HOUR)}), name="SHOWUPDATER", id="SHOWUPDATER", replace_existing=True ) # add daily search job to scheduler sickrage.Scheduler.add_job( sickrage.DAILYSEARCHER.run, SRIntervalTrigger( **{'minutes': sickrage.DAILY_SEARCHER_FREQ, 'min': sickrage.MIN_DAILY_SEARCHER_FREQ}), name="DAILYSEARCHER", id="DAILYSEARCHER", replace_existing=True ) # add backlog search job to scheduler sickrage.Scheduler.add_job( sickrage.BACKLOGSEARCHER.run, SRIntervalTrigger( **{'minutes': sickrage.BACKLOG_SEARCHER_FREQ, 'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ}), name="BACKLOG", id="BACKLOG", replace_existing=True ) # add auto-postprocessing job to scheduler job = sickrage.Scheduler.add_job( auto_postprocessor.PostProcessor().run, SRIntervalTrigger(**{'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ, 'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ}), name="POSTPROCESSOR", id="POSTPROCESSOR", replace_existing=True ) (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]() # add find propers job to scheduler job = sickrage.Scheduler.add_job( sickrage.PROPERSEARCHER.run, SRIntervalTrigger(**{ 'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ sickrage.PROPER_SEARCHER_INTERVAL]}), name="PROPERSEARCHER", id="PROPERSEARCHER", replace_existing=True ) (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]() # add trakt.tv checker job to scheduler job = sickrage.Scheduler.add_job( sickrage.TRAKTSEARCHER.run, SRIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER", replace_existing=True, ) (job.pause, job.resume)[sickrage.USE_TRAKT]() # add subtitles finder job to scheduler job = sickrage.Scheduler.add_job( sickrage.SUBTITLESEARCHER.run, SRIntervalTrigger(**{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER", replace_existing=True ) (job.pause, job.resume)[sickrage.USE_SUBTITLES]() # initialize web server sickrage.WEB_SERVER = SRWebServer(**{ 'port': int(sickrage.WEB_PORT), 'host': sickrage.WEB_HOST, 'data_root': sickrage.DATA_DIR, 'gui_root': sickrage.GUI_DIR, 'web_root': sickrage.WEB_ROOT, 'log_dir': sickrage.WEB_LOG or sickrage.LOG_DIR, 'username': sickrage.WEB_USERNAME, 'password': sickrage.WEB_PASSWORD, 'enable_https': sickrage.ENABLE_HTTPS, 'handle_reverse_proxy': sickrage.HANDLE_REVERSE_PROXY, 'https_cert': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT), 'https_key': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY), 'daemonize': sickrage.DAEMONIZE, 'pidfile': sickrage.PIDFILE, 'stop_timeout': 3, 'nolaunch': sickrage.WEB_NOLAUNCH }) sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format(sickrage.VERSION, sickrage.CONFIG_FILE)) sickrage.INITIALIZED = True return True
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile( os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join( sickrage.DATA_DIR, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE self.srLogger.logFile = self.srConfig.LOG_FILE # start logger self.srLogger.start() # Check available space try: total_space, available_space = getFreeSpace(sickrage.DATA_DIR) if available_space < 100: self.srLogger.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) sickrage.restart = False return except: self.srLogger.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [MainDB, CacheDB, FailedDB]: # initialize the database db().initialize() # migrate the database db().migrate() # compact the main database db().compact() # load data for shows from database self.load_shows() # build name cache self.NAMECACHE.build() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, folder), ignore_errors=True) except Exception: continue # init anidb connection if not self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection( keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # initialize metadata_providers for cur_metadata_tuple in [ (self.srConfig.METADATA_KODI, kodi), (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus), (self.srConfig.METADATA_MEDIABROWSER, mediabrowser), (self.srConfig.METADATA_PS3, ps3), (self.srConfig.METADATA_WDTV, wdtv), (self.srConfig.METADATA_TIVO, tivo), (self.srConfig.METADATA_MEDE8ER, mede8er) ]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) self.metadataProviderDict[tmp_provider.name] = tmp_provider # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{ 'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ }), name="VERSIONUPDATER", id="VERSIONUPDATER") # add network timezones updater job self.srScheduler.add_job(update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER") # add namecache updater job self.srScheduler.add_job( self.NAMECACHE.run, srIntervalTrigger( **{ 'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ }), name="NAMECACHE", id="NAMECACHE") # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{ 'hours': 1, 'start_date': datetime.datetime.now().replace( hour=self.srConfig.SHOWUPDATE_HOUR) }), name="SHOWUPDATER", id="SHOWUPDATER") # add daily search job self.srScheduler.add_job( self.DAILYSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ }), name="DAILYSEARCHER", id="DAILYSEARCHER") # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ }), name="BACKLOG", id="BACKLOG") # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger( **{ 'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ }), name="POSTPROCESSOR", id="POSTPROCESSOR") # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger( **{ 'minutes': { '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.srConfig.PROPER_SEARCHER_INTERVAL] }), name="PROPERSEARCHER", id="PROPERSEARCHER") # add trakt.tv checker job self.srScheduler.add_job(self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER") # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger( **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER") # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start queue's self.SEARCHQUEUE.start() self.SHOWQUEUE.start() # start webserver self.srWebServer.start() # start ioloop event handler IOLoop.current().start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # Check if we need to perform a restore first if os.path.exists( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))): success = restoreSR( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath( os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))): if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')): helpers.moveFile( os.path.join(sickrage.DATA_DIR, 'sickrage.db'), os.path.join( sickrage.DATA_DIR, '{}.bak-{}'.format( 'sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile( os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db'))) # load config self.srConfig.load() # set socket timeout socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT) # setup logger settings self.srLogger.logSize = self.srConfig.LOG_SIZE self.srLogger.logNr = self.srConfig.LOG_NR self.srLogger.logFile = self.srConfig.LOG_FILE self.srLogger.debugLogging = sickrage.DEBUG self.srLogger.consoleLogging = not sickrage.QUITE # start logger self.srLogger.start() # Check available space try: total_space, available_space = getFreeSpace(sickrage.DATA_DIR) if available_space < 100: self.srLogger.error( 'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) sickrage.restart = False return except: self.srLogger.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.mainDB, self.cacheDB, self.failedDB]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time( ) - 604800: # 7 days self.mainDB.compact() self.srConfig.LAST_DB_COMPACT = int(time.time()) # load data for shows from database self.load_shows() if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): self.srConfig.DEFAULT_PAGE = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.srConfig.USE_ANIDB: try: self.ADBA_CONNECTION = adba.Connection( keepAlive=True, log=lambda msg: self.srLogger.debug( "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD) except Exception as e: self.srLogger.warning("AniDB exception msg: %r " % repr(e)) if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535: self.srConfig.WEB_PORT = 8081 if not self.srConfig.WEB_COOKIE_SECRET: self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.srConfig.ANON_REDIRECT.endswith('?'): self.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS): self.srConfig.ROOT_DIRS = '' self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): self.srConfig.NZB_METHOD = 'blackhole' if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.srConfig.TORRENT_METHOD = 'blackhole' if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ: self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ: self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time( ) if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ: self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ: self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ if self.srConfig.SHOWUPDATE_HOUR > 23: self.srConfig.SHOWUPDATE_HOUR = 0 elif self.srConfig.SHOWUPDATE_HOUR < 0: self.srConfig.SHOWUPDATE_HOUR = 0 if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ if self.srConfig.SUBTITLES_LANGUAGES[0] == '': self.srConfig.SUBTITLES_LANGUAGES = [] # add version checker job self.srScheduler.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{ 'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ }), name="VERSIONUPDATER", id="VERSIONUPDATER") # add network timezones updater job self.srScheduler.add_job(update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER") # add show updater job self.srScheduler.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{ 'days': 1, 'start_date': datetime.datetime.now().replace( hour=self.srConfig.SHOWUPDATE_HOUR) }), name="SHOWUPDATER", id="SHOWUPDATER") # add show next episode job self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode, srIntervalTrigger(**{'hours': 1}), name="SHOWNEXTEP", id="SHOWNEXTEP") # add daily search job self.srScheduler.add_job(self.DAILYSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ, 'start_date': datetime.datetime.now() + datetime.timedelta(minutes=4) }), name="DAILYSEARCHER", id="DAILYSEARCHER") # add backlog search job self.srScheduler.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{ 'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ, 'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ, 'start_date': datetime.datetime.now() + datetime.timedelta(minutes=30) }), name="BACKLOG", id="BACKLOG") # add auto-postprocessing job self.srScheduler.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger( **{ 'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ }), name="POSTPROCESSOR", id="POSTPROCESSOR") # add find proper job self.srScheduler.add_job( self.PROPERSEARCHER.run, srIntervalTrigger( **{ 'minutes': { '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[self.srConfig.PROPER_SEARCHER_INTERVAL] }), name="PROPERSEARCHER", id="PROPERSEARCHER") # add trakt.tv checker job self.srScheduler.add_job(self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER") # add subtitles finder job self.srScheduler.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger( **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER") # start scheduler service self.srScheduler.start() # Pause/Resume PROPERSEARCHER job (self.srScheduler.get_job('PROPERSEARCHER').pause, self.srScheduler.get_job('PROPERSEARCHER').resume )[self.srConfig.DOWNLOAD_PROPERS]() # Pause/Resume TRAKTSEARCHER job (self.srScheduler.get_job('TRAKTSEARCHER').pause, self.srScheduler.get_job('TRAKTSEARCHER').resume )[self.srConfig.USE_TRAKT]() # Pause/Resume SUBTITLESEARCHER job (self.srScheduler.get_job('SUBTITLESEARCHER').pause, self.srScheduler.get_job('SUBTITLESEARCHER').resume )[self.srConfig.USE_SUBTITLES]() # Pause/Resume POSTPROCESS job (self.srScheduler.get_job('POSTPROCESSOR').pause, self.srScheduler.get_job('POSTPROCESSOR').resume )[self.srConfig.PROCESS_AUTOMATICALLY]() # start queue's self.SEARCHQUEUE.start() self.SHOWQUEUE.start() # start webserver self.srWebServer.start() self.srLogger.info("SiCKRAGE :: STARTED") self.srLogger.info("SiCKRAGE :: VERSION:[{}]".format( self.VERSIONUPDATER.version)) self.srLogger.info("SiCKRAGE :: CONFIG:[{}] [v{}]".format( sickrage.CONFIG_FILE, self.srConfig.CONFIG_VERSION)) self.srLogger.info("SiCKRAGE :: URL:[{}://{}:{}/]".format( ('http', 'https')[self.srConfig.ENABLE_HTTPS], self.srConfig.WEB_HOST, self.srConfig.WEB_PORT)) # launch browser window if all( [not sickrage.NOLAUNCH, sickrage.srCore.srConfig.LAUNCH_BROWSER]): threading.Thread( None, lambda: launch_browser( ('http', 'https')[sickrage.srCore.srConfig.ENABLE_HTTPS], self.srConfig.WEB_HOST, sickrage.srCore.srConfig.WEB_PORT) ).start() # start ioloop event handler self.io_loop.start()
def start(self): self.started = True # thread name threading.currentThread().setName('CORE') # init core classes self.notifier_providers = NotifierProviders() self.metadata_providers = MetadataProviders() self.search_providers = SearchProviders() self.log = Logger() self.config = Config() self.api = API() self.alerts = Notifications() self.main_db = MainDB() self.cache_db = CacheDB() self.failed_db = FailedDB() self.scheduler = BackgroundScheduler() self.wserver = WebServer() self.wsession = WebSession() self.google_auth = GoogleAuth() self.name_cache = NameCache() self.show_queue = ShowQueue() self.search_queue = SearchQueue() self.postprocessor_queue = PostProcessorQueue() self.version_updater = VersionUpdater() self.show_updater = ShowUpdater() self.daily_searcher = DailySearcher() self.backlog_searcher = BacklogSearcher() self.proper_searcher = ProperSearcher() self.trakt_searcher = TraktSearcher() self.subtitle_searcher = SubtitleSearcher() self.auto_postprocessor = AutoPostProcessor() # Check if we need to perform a restore first if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))): success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir) print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success]) if success: shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True) # migrate old database file names to new ones if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))): if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')): helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'), os.path.join(self.data_dir, '{}.bak-{}' .format('sickrage.db', datetime.datetime.now().strftime( '%Y%m%d_%H%M%S')))) helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')), os.path.abspath(os.path.join(self.data_dir, 'sickrage.db'))) # load config self.config.load() # set language self.config.change_gui_lang(self.config.gui_lang) # set socket timeout socket.setdefaulttimeout(self.config.socket_timeout) # setup logger settings self.log.logSize = self.config.log_size self.log.logNr = self.config.log_nr self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log') self.log.debugLogging = self.config.debug self.log.consoleLogging = not self.quite # start logger self.log.start() # user agent if self.config.random_user_agent: self.user_agent = UserAgent().random urlparse.uses_netloc.append('scgi') urllib.FancyURLopener.version = self.user_agent # Check available space try: total_space, available_space = getFreeSpace(self.data_dir) if available_space < 100: self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data ' 'otherwise. Only %sMB left', available_space) return except Exception: self.log.error('Failed getting diskspace: %s', traceback.format_exc()) # perform database startup actions for db in [self.main_db, self.cache_db, self.failed_db]: # initialize database db.initialize() # check integrity of database db.check_integrity() # migrate database db.migrate() # misc database cleanups db.cleanup() # compact main database if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800: # 7 days self.main_db.compact() self.config.last_db_compact = int(time.time()) # load name cache self.name_cache.load() # load data for shows from database self.load_shows() if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'): self.config.default_page = 'home' # cleanup cache folder for folder in ['mako', 'sessions', 'indexers']: try: shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True) except Exception: continue # init anidb connection if self.config.use_anidb: def anidb_logger(msg): return self.log.debug("AniDB: {} ".format(msg)) try: self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger) self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password) except Exception as e: self.log.warning("AniDB exception msg: %r " % repr(e)) if self.config.web_port < 21 or self.config.web_port > 65535: self.config.web_port = 8081 if not self.config.web_cookie_secret: self.config.web_cookie_secret = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not self.config.anon_redirect.endswith('?'): self.config.anon_redirect = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs): self.config.root_dirs = '' self.config.naming_force_folders = check_force_season_folders() if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'): self.config.nzb_method = 'blackhole' if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'): self.config.torrent_method = 'blackhole' if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq: self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq: self.config.daily_searcher_freq = self.config.min_daily_searcher_freq self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time() if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq: self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq if self.config.version_updater_freq < self.config.min_version_updater_freq: self.config.version_updater_freq = self.config.min_version_updater_freq if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq: self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'): self.config.proper_searcher_interval = 'daily' if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23: self.config.showupdate_hour = 0 if self.config.subtitles_languages[0] == '': self.config.subtitles_languages = [] # add version checker job self.scheduler.add_job( self.version_updater.run, IntervalTrigger( hours=self.config.version_updater_freq ), name="VERSIONUPDATER", id="VERSIONUPDATER" ) # add network timezones updater job self.scheduler.add_job( update_network_dict, IntervalTrigger( days=1 ), name="TZUPDATER", id="TZUPDATER" ) # add show updater job self.scheduler.add_job( self.show_updater.run, IntervalTrigger( days=1, start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour) ), name="SHOWUPDATER", id="SHOWUPDATER" ) # add daily search job self.scheduler.add_job( self.daily_searcher.run, IntervalTrigger( minutes=self.config.daily_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=4) ), name="DAILYSEARCHER", id="DAILYSEARCHER" ) # add backlog search job self.scheduler.add_job( self.backlog_searcher.run, IntervalTrigger( minutes=self.config.backlog_searcher_freq, start_date=datetime.datetime.now() + datetime.timedelta(minutes=30) ), name="BACKLOG", id="BACKLOG" ) # add auto-postprocessing job self.scheduler.add_job( self.auto_postprocessor.run, IntervalTrigger( minutes=self.config.autopostprocessor_freq ), name="POSTPROCESSOR", id="POSTPROCESSOR" ) # add find proper job self.scheduler.add_job( self.proper_searcher.run, IntervalTrigger( minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ self.config.proper_searcher_interval] ), name="PROPERSEARCHER", id="PROPERSEARCHER" ) # add trakt.tv checker job self.scheduler.add_job( self.trakt_searcher.run, IntervalTrigger( hours=1 ), name="TRAKTSEARCHER", id="TRAKTSEARCHER" ) # add subtitles finder job self.scheduler.add_job( self.subtitle_searcher.run, IntervalTrigger( hours=self.config.subtitle_searcher_freq ), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER" ) # start scheduler service self.scheduler.start() # Pause/Resume PROPERSEARCHER job (self.scheduler.get_job('PROPERSEARCHER').pause, self.scheduler.get_job('PROPERSEARCHER').resume )[self.config.download_propers]() # Pause/Resume TRAKTSEARCHER job (self.scheduler.get_job('TRAKTSEARCHER').pause, self.scheduler.get_job('TRAKTSEARCHER').resume )[self.config.use_trakt]() # Pause/Resume SUBTITLESEARCHER job (self.scheduler.get_job('SUBTITLESEARCHER').pause, self.scheduler.get_job('SUBTITLESEARCHER').resume )[self.config.use_subtitles]() # Pause/Resume POSTPROCESS job (self.scheduler.get_job('POSTPROCESSOR').pause, self.scheduler.get_job('POSTPROCESSOR').resume )[self.config.process_automatically]() # start queue's self.search_queue.start() self.show_queue.start() self.postprocessor_queue.start() # start webserver self.wserver.start()
def initialize(): if not sickrage.INITIALIZED: with threading.Lock(): # init encoding encodingInit() # Check if we need to perform a restore first os.chdir(sickrage.DATA_DIR) restore_dir = os.path.join(sickrage.DATA_DIR, 'restore') if os.path.exists(restore_dir): success = restoreDB(restore_dir, sickrage.DATA_DIR) sickrage.LOGGER.info( "Restore: restoring DB and config.ini %s!\n" % ("FAILED", "SUCCESSFUL")[success]) # init indexerApi sickrage.INDEXER_API = indexerApi # initialize notifiers sickrage.NOTIFIERS = AttrDict( libnotify=LibnotifyNotifier(), kodi_notifier=KODINotifier(), plex_notifier=PLEXNotifier(), emby_notifier=EMBYNotifier(), nmj_notifier=NMJNotifier(), nmjv2_notifier=NMJv2Notifier(), synoindex_notifier=synoIndexNotifier(), synology_notifier=synologyNotifier(), pytivo_notifier=pyTivoNotifier(), growl_notifier=GrowlNotifier(), prowl_notifier=ProwlNotifier(), libnotify_notifier=LibnotifyNotifier(), pushover_notifier=PushoverNotifier(), boxcar_notifier=BoxcarNotifier(), boxcar2_notifier=Boxcar2Notifier(), nma_notifier=NMA_Notifier(), pushalot_notifier=PushalotNotifier(), pushbullet_notifier=PushbulletNotifier(), freemobile_notifier=FreeMobileNotifier(), twitter_notifier=TwitterNotifier(), trakt_notifier=TraktNotifier(), email_notifier=EmailNotifier(), ) sickrage.NAMING_EP_TYPE = ( "%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d") sickrage.SPORTS_EP_TYPE = ( "%(seasonnumber)dx%(episodenumber)02d", "s%(seasonnumber)02de%(episodenumber)02d", "S%(seasonnumber)02dE%(episodenumber)02d", "%(seasonnumber)02dx%(episodenumber)02d") sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02", "01x02") sickrage.NAMING_MULTI_EP_TYPE = { 0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE), 1: [" - " + x for x in sickrage.NAMING_EP_TYPE], 2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")] } sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate", "repeat") sickrage.NAMING_SEP_TYPE = (" - ", " ") sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space") # migrate old database filenames to new ones if not os.path.exists(main_db.MainDB().filename ) and os.path.exists("sickbeard.db"): helpers.moveFile("sickbeard.db", main_db.MainDB().filename) # init config file srConfig.load_config(sickrage.CONFIG_FILE, True) # set socket timeout socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT) # init logger sickrage.LOGGER = sickrage.LOGGER.__class__( logFile=sickrage.LOG_FILE, logSize=sickrage.LOG_SIZE, logNr=sickrage.LOG_NR, fileLogging=makeDir(sickrage.LOG_DIR), debugLogging=sickrage.DEBUG) # init updater and get current version sickrage.VERSIONUPDATER = VersionUpdater() sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version # initialize the main SB database main_db.MainDB().InitialSchema().upgrade() # initialize the cache database cache_db.CacheDB().InitialSchema().upgrade() # initialize the failed downloads database failed_db.FailedDB().InitialSchema().upgrade() # fix up any db problems main_db.MainDB().SanityCheck() if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): sickrage.DEFAULT_PAGE = 'home' if not makeDir(sickrage.CACHE_DIR): sickrage.LOGGER.error("!!! Creating local cache dir failed") sickrage.CACHE_DIR = None # Check if we need to perform a restore of the cache folder try: restore_dir = os.path.join(sickrage.DATA_DIR, 'restore') if os.path.exists(restore_dir) and os.path.exists( os.path.join(restore_dir, 'cache')): def restore_cache(srcdir, dstdir): def path_leaf(path): head, tail = os.path.split(path) return tail or os.path.basename(head) try: if os.path.isdir(dstdir): bakfilename = '{}-{1}'.format( path_leaf(dstdir), datetime.datetime.strftime( datetime.date.now(), '%Y%m%d_%H%M%S')) shutil.move( dstdir, os.path.join(os.path.dirname(dstdir), bakfilename)) shutil.move(srcdir, dstdir) sickrage.LOGGER.info( "Restore: restoring cache successful") except Exception as E: sickrage.LOGGER.error( "Restore: restoring cache failed: {}".format( E)) restore_cache(os.path.join(restore_dir, 'cache'), sickrage.CACHE_DIR) except Exception as e: sickrage.LOGGER.error( "Restore: restoring cache failed: {}".format(e)) finally: if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')): try: removetree(os.path.join(sickrage.DATA_DIR, 'restore')) except Exception as e: sickrage.LOGGER.error( "Restore: Unable to remove the restore directory: {}" .format(e)) for cleanupDir in ['mako', 'sessions', 'indexers']: try: removetree( os.path.join(sickrage.CACHE_DIR, cleanupDir)) except Exception as e: sickrage.LOGGER.warning( "Restore: Unable to remove the cache/{} directory: {1}" .format(cleanupDir, e)) if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535: sickrage.WEB_PORT = 8081 if not sickrage.WEB_COOKIE_SECRET: sickrage.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not sickrage.ANON_REDIRECT.endswith('?'): sickrage.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS): sickrage.ROOT_DIRS = '' sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders() if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): sickrage.NZB_METHOD = 'blackhole' if not sickrage.PROVIDER_ORDER: sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \ sickrage.providersDict[GenericProvider.TORRENT].keys() if sickrage.TORRENT_METHOD not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet'): sickrage.TORRENT_METHOD = 'blackhole' if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): sickrage.PROPER_SEARCHER_INTERVAL = 'daily' if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ: sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ: sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ: sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ: sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ: sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ if sickrage.SHOWUPDATE_HOUR > 23: sickrage.SHOWUPDATE_HOUR = 0 elif sickrage.SHOWUPDATE_HOUR < 0: sickrage.SHOWUPDATE_HOUR = 0 if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ: sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ if sickrage.SUBTITLES_LANGUAGES[0] == '': sickrage.SUBTITLES_LANGUAGES = [] sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace( ":%S", "") # initialize metadata_providers sickrage.metadataProvideDict = get_metadata_generator_dict() for cur_metadata_tuple in [ (sickrage.METADATA_KODI, kodi), (sickrage.METADATA_KODI_12PLUS, kodi_12plus), (sickrage.METADATA_MEDIABROWSER, mediabrowser), (sickrage.METADATA_PS3, ps3), (sickrage.METADATA_WDTV, wdtv), (sickrage.METADATA_TIVO, tivo), (sickrage.METADATA_MEDE8ER, mede8er) ]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider # init caches sickrage.NAMECACHE = nameCache() # init queues sickrage.SHOWUPDATER = ShowUpdater() sickrage.SHOWQUEUE = ShowQueue() sickrage.SEARCHQUEUE = SearchQueue() # load data for shows from database sickrage.showList = load_shows() # init searchers sickrage.DAILYSEARCHER = DailySearcher() sickrage.BACKLOGSEARCHER = BacklogSearcher() sickrage.PROPERSEARCHER = ProperSearcher() sickrage.TRAKTSEARCHER = TraktSearcher() sickrage.SUBTITLESEARCHER = SubtitleSearcher() # init scheduler sickrage.Scheduler = Scheduler() # add version checker job to scheduler sickrage.Scheduler.add_job( sickrage.VERSIONUPDATER.run, SRIntervalTrigger( **{ 'hours': sickrage.VERSION_UPDATER_FREQ, 'min': sickrage.MIN_VERSION_UPDATER_FREQ }), name="VERSIONUPDATER", id="VERSIONUPDATER", replace_existing=True) # add network timezones updater job to scheduler sickrage.Scheduler.add_job(update_network_dict, SRIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER", replace_existing=True) # add namecache updater job to scheduler sickrage.Scheduler.add_job( sickrage.NAMECACHE.run, SRIntervalTrigger( **{ 'minutes': sickrage.NAMECACHE_FREQ, 'min': sickrage.MIN_NAMECACHE_FREQ }), name="NAMECACHE", id="NAMECACHE", replace_existing=True) # add show queue job to scheduler sickrage.Scheduler.add_job(sickrage.SHOWQUEUE.run, SRIntervalTrigger(**{'seconds': 3}), name="SHOWQUEUE", id="SHOWQUEUE", replace_existing=True) # add search queue job to scheduler sickrage.Scheduler.add_job(sickrage.SEARCHQUEUE.run, SRIntervalTrigger(**{'seconds': 1}), name="SEARCHQUEUE", id="SEARCHQUEUE", replace_existing=True) # add show updater job to scheduler sickrage.Scheduler.add_job( sickrage.SHOWUPDATER.run, SRIntervalTrigger( **{ 'hours': 1, 'start_date': datetime.datetime.now().replace( hour=sickrage.SHOWUPDATE_HOUR) }), name="SHOWUPDATER", id="SHOWUPDATER", replace_existing=True) # add daily search job to scheduler sickrage.Scheduler.add_job( sickrage.DAILYSEARCHER.run, SRIntervalTrigger( **{ 'minutes': sickrage.DAILY_SEARCHER_FREQ, 'min': sickrage.MIN_DAILY_SEARCHER_FREQ }), name="DAILYSEARCHER", id="DAILYSEARCHER", replace_existing=True) # add backlog search job to scheduler sickrage.Scheduler.add_job( sickrage.BACKLOGSEARCHER.run, SRIntervalTrigger( **{ 'minutes': sickrage.BACKLOG_SEARCHER_FREQ, 'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ }), name="BACKLOG", id="BACKLOG", replace_existing=True) # add auto-postprocessing job to scheduler job = sickrage.Scheduler.add_job( auto_postprocessor.PostProcessor().run, SRIntervalTrigger( **{ 'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ, 'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ }), name="POSTPROCESSOR", id="POSTPROCESSOR", replace_existing=True) (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]() # add find propers job to scheduler job = sickrage.Scheduler.add_job( sickrage.PROPERSEARCHER.run, SRIntervalTrigger( **{ 'minutes': { '15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60 }[sickrage.PROPER_SEARCHER_INTERVAL] }), name="PROPERSEARCHER", id="PROPERSEARCHER", replace_existing=True) (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]() # add trakt.tv checker job to scheduler job = sickrage.Scheduler.add_job( sickrage.TRAKTSEARCHER.run, SRIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER", replace_existing=True, ) (job.pause, job.resume)[sickrage.USE_TRAKT]() # add subtitles finder job to scheduler job = sickrage.Scheduler.add_job( sickrage.SUBTITLESEARCHER.run, SRIntervalTrigger( **{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER", replace_existing=True) (job.pause, job.resume)[sickrage.USE_SUBTITLES]() # initialize web server sickrage.WEB_SERVER = SRWebServer( **{ 'port': int(sickrage.WEB_PORT), 'host': sickrage.WEB_HOST, 'data_root': sickrage.DATA_DIR, 'gui_root': sickrage.GUI_DIR, 'web_root': sickrage.WEB_ROOT, 'log_dir': sickrage.WEB_LOG or sickrage.LOG_DIR, 'username': sickrage.WEB_USERNAME, 'password': sickrage.WEB_PASSWORD, 'enable_https': sickrage.ENABLE_HTTPS, 'handle_reverse_proxy': sickrage.HANDLE_REVERSE_PROXY, 'https_cert': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT), 'https_key': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY), 'daemonize': sickrage.DAEMONIZE, 'pidfile': sickrage.PIDFILE, 'stop_timeout': 3, 'nolaunch': sickrage.WEB_NOLAUNCH }) sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format( sickrage.VERSION, sickrage.CONFIG_FILE)) sickrage.INITIALIZED = True return True