def logSnatch(searchResult): """ Logs a successful snatch :param searchResult: Search result that was successful """ logDate = datetime.today().strftime(History.date_format) release = FailedHistory.prepareFailedName(searchResult.name) providerClass = searchResult.provider if providerClass is not None: provider = providerClass.name else: provider = "unknown" show_obj = searchResult.episodes[0].show for episode in searchResult.episodes: failed_db.FailedDB().action( "INSERT INTO history (date, size, release, provider, showid, season, episode, old_status)" "VALUES (?, ?, ?, ?, ?, ?, ?, ?)", [ logDate, searchResult.size, release, provider, show_obj.indexerid, episode.season, episode.episode, episode.status ])
def setUp_test_db(force=False): """upgrades the db to the latest version """ global TESTDB_INITALIZED if not TESTDB_INITALIZED or force: # remove old db files tearDown_test_db() # upgrade main main_db.MainDB().InitialSchema().upgrade() # sanity check main main_db.MainDB().SanityCheck() # upgrade cache cache_db.CacheDB().InitialSchema().upgrade() # upgrade failed failed_db.FailedDB().InitialSchema().upgrade() # populate scene exceiptions table # retrieve_exceptions(False, False) TESTDB_INITALIZED = True
def revertFailedEpisode(epObj): """Restore the episodes of a failed download to their original state""" sql_results = failed_db.FailedDB().select( "SELECT * FROM history WHERE showid=? AND season=?", [epObj.show.indexerid, epObj.season]) history_eps = dict([(res[b"episode"], res) for res in sql_results]) try: sickrage.srLogger.info("Reverting episode (%s, %s): %s" % (epObj.season, epObj.episode, epObj.name)) with epObj.lock: if epObj.episode in history_eps: sickrage.srLogger.info("Found in history") epObj.status = history_eps[epObj.episode][b'old_status'] else: sickrage.srLogger.warning( "WARNING: Episode not found in history. Setting it back to WANTED" ) epObj.status = WANTED epObj.saveToDB() except EpisodeNotFoundException as e: sickrage.srLogger.warning( "Unable to create episode, please set its status manually: {}". format(e.message))
def logFailed(release): log_str = "" size = -1 provider = "" release = FailedHistory.prepareFailedName(release) sql_results = failed_db.FailedDB().select( "SELECT * FROM history WHERE release=?", [release]) if len(sql_results) == 0: sickrage.srLogger.warning("Release not found in snatch history.") elif len(sql_results) > 1: sickrage.srLogger.warning( "Multiple logged snatches found for release") sizes = len(set(x[b"size"] for x in sql_results)) providers = len(set(x[b"provider"] for x in sql_results)) if sizes == 1: sickrage.srLogger.warning( "However, they're all the same size. Continuing with found size." ) size = sql_results[0][b"size"] else: sickrage.srLogger.warning( "They also vary in size. Deleting the logged snatches and recording this release with no size/provider" ) for result in sql_results: FailedHistory.deleteLoggedSnatch(result[b"release"], result[b"size"], result[b"provider"]) if providers == 1: sickrage.srLogger.info( "They're also from the same provider. Using it as well.") provider = sql_results[0][b"provider"] else: size = sql_results[0][b"size"] provider = sql_results[0][b"provider"] if not FailedHistory.hasFailed(release, size, provider): failed_db.FailedDB().action( "INSERT INTO failed (release, size, provider) VALUES (?, ?, ?)", [release, size, provider]) FailedHistory.deleteLoggedSnatch(release, size, provider) return log_str
def findFailedRelease(epObj): """ Find releases in history by show ID and season. Return None for release if multiple found or no release found. """ release = None provider = None # Clear old snatches for this release if any exist failed_db.FailedDB().action( "DELETE FROM history WHERE showid=" + str(epObj.show.indexerid) + " AND season=" + str(epObj.season) + " AND episode=" + str(epObj.episode) + " AND date < (SELECT max(date) FROM history WHERE showid=" + str(epObj.show.indexerid) + " AND season=" + str(epObj.season) + " AND episode=" + str(epObj.episode) + ")") # Search for release in snatch history results = failed_db.FailedDB().select( "SELECT release, provider, DATE FROM history WHERE showid=? AND season=? AND episode=?", [epObj.show.indexerid, epObj.season, epObj.episode]) for result in results: release = str(result[b"release"]) provider = str(result[b"provider"]) date = result[b"date"] # Clear any incomplete snatch records for this release if any exist failed_db.FailedDB().action( "DELETE FROM history WHERE release=? AND DATE!=?", [release, date]) # Found a previously failed release sickrage.srLogger.debug( "Failed release found for season (%s): (%s)" % (epObj.season, result[b"release"])) return (release, provider) # Release was not found sickrage.srLogger.debug("No releases found for season (%s) of (%s)" % (epObj.season, epObj.show.indexerid)) return (release, provider)
def deleteLoggedSnatch(release, size, provider): """ Remove a snatch from history :param release: release to delete :param size: Size of release :param provider: Provider to delete it from """ release = FailedHistory.prepareFailedName(release) failed_db.FailedDB().action( "DELETE FROM history WHERE release=? AND size=? AND provider=?", [release, size, provider])
def hasFailed(release, size, provider="%"): """ Returns True if a release has previously failed. If provider is given, return True only if the release is found with that specific provider. Otherwise, return True if the release is found with any provider. :param release: Release name to record failure :param size: Size of release :param provider: Specific provider to search (defaults to all providers) :return: True if a release has previously failed. """ release = FailedHistory.prepareFailedName(release) sql_results = failed_db.FailedDB().select( "SELECT * FROM failed WHERE release=? AND size=? AND provider LIKE ?", [release, size, provider]) return (len(sql_results) > 0)
def start(self): self.PID = os.getpid() # set socket timeout socket.setdefaulttimeout(sickrage.srConfig.SOCKET_TIMEOUT) # init version updater self.VERSIONUPDATER = srVersionUpdater() # init updater and get current version self.VERSION = self.VERSIONUPDATER.updater.version # init services self.SCHEDULER = srScheduler() self.WEBSERVER = srWebServer() self.INDEXER_API = srIndexerApi # init caches self.NAMECACHE = srNameCache() # init queues self.SHOWUPDATER = srShowUpdater() self.SHOWQUEUE = srShowQueue() self.SEARCHQUEUE = srSearchQueue() # init searchers self.DAILYSEARCHER = srDailySearcher() self.BACKLOGSEARCHER = srBacklogSearcher() self.PROPERSEARCHER = srProperSearcher() self.TRAKTSEARCHER = srTraktSearcher() self.SUBTITLESEARCHER = srSubtitleSearcher() # init postprocessor self.AUTOPOSTPROCESSOR = srPostProcessor() # migrate old database file names to new ones if not os.path.exists(main_db.MainDB().filename) and os.path.exists("sickbeard.db"): helpers.moveFile("sickbeard.db", main_db.MainDB().filename) # initialize the main SB database main_db.MainDB().InitialSchema().upgrade() # initialize the cache database cache_db.CacheDB().InitialSchema().upgrade() # initialize the failed downloads database failed_db.FailedDB().InitialSchema().upgrade() # fix up any db problems main_db.MainDB().SanityCheck() # load data for shows from database self.load_shows() if sickrage.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'): sickrage.srConfig.DEFAULT_PAGE = 'home' if not makeDir(sickrage.srConfig.CACHE_DIR): sickrage.srLogger.error("!!! Creating local cache dir failed") sickrage.srConfig.CACHE_DIR = get_temp_dir() # Check if we need to perform a restore of the cache folder try: restore_dir = os.path.join(sickrage.DATA_DIR, 'restore') if os.path.exists(restore_dir) and os.path.exists(os.path.join(restore_dir, 'cache')): def restore_cache(src_dir, dst_dir): def path_leaf(path): head, tail = os.path.split(path) return tail or os.path.basename(head) try: if os.path.isdir(dst_dir): bak_filename = '{}-{}'.format(path_leaf(dst_dir), datetime.now().strftime('%Y%m%d_%H%M%S')) shutil.move(dst_dir, os.path.join(os.path.dirname(dst_dir), bak_filename)) shutil.move(src_dir, dst_dir) sickrage.srLogger.info("Restore: restoring cache successful") except Exception as E: sickrage.srLogger.error("Restore: restoring cache failed: {}".format(E.message)) restore_cache(os.path.join(restore_dir, 'cache'), sickrage.srConfig.CACHE_DIR) except Exception as e: sickrage.srLogger.error("Restore: restoring cache failed: {}".format(e.message)) finally: if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')): try: removetree(os.path.join(sickrage.DATA_DIR, 'restore')) except Exception as e: sickrage.srLogger.error("Restore: Unable to remove the restore directory: {}".format(e.message)) for cleanupDir in ['mako', 'sessions', 'indexers']: try: removetree(os.path.join(sickrage.srConfig.CACHE_DIR, cleanupDir)) except Exception as e: sickrage.srLogger.warning( "Restore: Unable to remove the cache/{} directory: {1}".format(cleanupDir, e)) if sickrage.srConfig.WEB_PORT < 21 or sickrage.srConfig.WEB_PORT > 65535: sickrage.srConfig.WEB_PORT = 8081 if not sickrage.srConfig.WEB_COOKIE_SECRET: sickrage.srConfig.WEB_COOKIE_SECRET = generateCookieSecret() # attempt to help prevent users from breaking links by using a bad url if not sickrage.srConfig.ANON_REDIRECT.endswith('?'): sickrage.srConfig.ANON_REDIRECT = '' if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.srConfig.ROOT_DIRS): sickrage.srConfig.ROOT_DIRS = '' sickrage.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders() if sickrage.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'): sickrage.srConfig.NZB_METHOD = 'blackhole' if not sickrage.srConfig.PROVIDER_ORDER: sickrage.srConfig.PROVIDER_ORDER = self.providersDict[GenericProvider.NZB].keys() + \ self.providersDict[GenericProvider.TORRENT].keys() if sickrage.srConfig.TORRENT_METHOD not in ( 'blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent', 'qbittorrent', 'mlnet'): sickrage.srConfig.TORRENT_METHOD = 'blackhole' if sickrage.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'): sickrage.srConfig.PROPER_SEARCHER_INTERVAL = 'daily' if sickrage.srConfig.AUTOPOSTPROCESSOR_FREQ < sickrage.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ: sickrage.srConfig.AUTOPOSTPROCESSOR_FREQ = sickrage.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ if sickrage.srConfig.NAMECACHE_FREQ < sickrage.srConfig.MIN_NAMECACHE_FREQ: sickrage.srConfig.NAMECACHE_FREQ = sickrage.srConfig.MIN_NAMECACHE_FREQ if sickrage.srConfig.DAILY_SEARCHER_FREQ < sickrage.srConfig.MIN_DAILY_SEARCHER_FREQ: sickrage.srConfig.DAILY_SEARCHER_FREQ = sickrage.srConfig.MIN_DAILY_SEARCHER_FREQ sickrage.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time() if sickrage.srConfig.BACKLOG_SEARCHER_FREQ < sickrage.srConfig.MIN_BACKLOG_SEARCHER_FREQ: sickrage.srConfig.BACKLOG_SEARCHER_FREQ = sickrage.srConfig.MIN_BACKLOG_SEARCHER_FREQ if sickrage.srConfig.VERSION_UPDATER_FREQ < sickrage.srConfig.MIN_VERSION_UPDATER_FREQ: sickrage.srConfig.VERSION_UPDATER_FREQ = sickrage.srConfig.MIN_VERSION_UPDATER_FREQ if sickrage.srConfig.SHOWUPDATE_HOUR > 23: sickrage.srConfig.SHOWUPDATE_HOUR = 0 elif sickrage.srConfig.SHOWUPDATE_HOUR < 0: sickrage.srConfig.SHOWUPDATE_HOUR = 0 if sickrage.srConfig.SUBTITLE_SEARCHER_FREQ < sickrage.srConfig.MIN_SUBTITLE_SEARCHER_FREQ: sickrage.srConfig.SUBTITLE_SEARCHER_FREQ = sickrage.srConfig.MIN_SUBTITLE_SEARCHER_FREQ sickrage.srConfig.NEWS_LATEST = sickrage.srConfig.NEWS_LAST_READ if sickrage.srConfig.SUBTITLES_LANGUAGES[0] == '': sickrage.srConfig.SUBTITLES_LANGUAGES = [] sickrage.srConfig.TIME_PRESET = sickrage.srConfig.TIME_PRESET_W_SECONDS.replace(":%S", "") # initialize metadata_providers self.metadataProviderDict = get_metadata_generator_dict() for cur_metadata_tuple in [(sickrage.srConfig.METADATA_KODI, kodi), (sickrage.srConfig.METADATA_KODI_12PLUS, kodi_12plus), (sickrage.srConfig.METADATA_MEDIABROWSER, mediabrowser), (sickrage.srConfig.METADATA_PS3, ps3), (sickrage.srConfig.METADATA_WDTV, wdtv), (sickrage.srConfig.METADATA_TIVO, tivo), (sickrage.srConfig.METADATA_MEDE8ER, mede8er)]: (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple tmp_provider = cur_metadata_class.metadata_class() tmp_provider.set_config(cur_metadata_config) self.metadataProviderDict[tmp_provider.name] = tmp_provider # add version checker job to scheduler self.SCHEDULER.add_job( self.VERSIONUPDATER.run, srIntervalTrigger( **{'hours': sickrage.srConfig.VERSION_UPDATER_FREQ, 'min': sickrage.srConfig.MIN_VERSION_UPDATER_FREQ}), name="VERSIONUPDATER", id="VERSIONUPDATER", replace_existing=True ) # add network timezones updater job to scheduler self.SCHEDULER.add_job( update_network_dict, srIntervalTrigger(**{'days': 1}), name="TZUPDATER", id="TZUPDATER", replace_existing=True ) # add namecache updater job to scheduler self.SCHEDULER.add_job( self.NAMECACHE.run, srIntervalTrigger(**{'minutes': sickrage.srConfig.NAMECACHE_FREQ, 'min': sickrage.srConfig.MIN_NAMECACHE_FREQ}), name="NAMECACHE", id="NAMECACHE", replace_existing=True ) # add show queue job to scheduler self.SCHEDULER.add_job( self.SHOWQUEUE.run, srIntervalTrigger(**{'seconds': 3}), name="SHOWQUEUE", id="SHOWQUEUE", replace_existing=True ) # add search queue job to scheduler self.SCHEDULER.add_job( self.SEARCHQUEUE.run, srIntervalTrigger(**{'seconds': 1}), name="SEARCHQUEUE", id="SEARCHQUEUE", replace_existing=True ) # add show updater job to scheduler self.SCHEDULER.add_job( self.SHOWUPDATER.run, srIntervalTrigger( **{'hours': 1, 'start_date': datetime.now().replace(hour=sickrage.srConfig.SHOWUPDATE_HOUR)}), name="SHOWUPDATER", id="SHOWUPDATER", replace_existing=True ) # add daily search job to scheduler self.SCHEDULER.add_job( self.DAILYSEARCHER.run, srIntervalTrigger( **{'minutes': sickrage.srConfig.DAILY_SEARCHER_FREQ, 'min': sickrage.srConfig.MIN_DAILY_SEARCHER_FREQ}), name="DAILYSEARCHER", id="DAILYSEARCHER", replace_existing=True ) # add backlog search job to scheduler self.SCHEDULER.add_job( self.BACKLOGSEARCHER.run, srIntervalTrigger( **{'minutes': sickrage.srConfig.BACKLOG_SEARCHER_FREQ, 'min': sickrage.srConfig.MIN_BACKLOG_SEARCHER_FREQ}), name="BACKLOG", id="BACKLOG", replace_existing=True ) # add auto-postprocessing job to scheduler job = self.SCHEDULER.add_job( self.AUTOPOSTPROCESSOR.run, srIntervalTrigger(**{'minutes': sickrage.srConfig.AUTOPOSTPROCESSOR_FREQ, 'min': sickrage.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}), name="POSTPROCESSOR", id="POSTPROCESSOR", replace_existing=True ) (job.pause, job.resume)[sickrage.srConfig.PROCESS_AUTOMATICALLY]() # add find proper job to scheduler job = self.SCHEDULER.add_job( self.PROPERSEARCHER.run, srIntervalTrigger(**{ 'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[ sickrage.srConfig.PROPER_SEARCHER_INTERVAL]}), name="PROPERSEARCHER", id="PROPERSEARCHER", replace_existing=True ) (job.pause, job.resume)[sickrage.srConfig.DOWNLOAD_PROPERS]() # add trakt.tv checker job to scheduler job = self.SCHEDULER.add_job( self.TRAKTSEARCHER.run, srIntervalTrigger(**{'hours': 1}), name="TRAKTSEARCHER", id="TRAKTSEARCHER", replace_existing=True, ) (job.pause, job.resume)[sickrage.srConfig.USE_TRAKT]() # add subtitles finder job to scheduler job = self.SCHEDULER.add_job( self.SUBTITLESEARCHER.run, srIntervalTrigger(**{'hours': sickrage.srConfig.SUBTITLE_SEARCHER_FREQ}), name="SUBTITLESEARCHER", id="SUBTITLESEARCHER", replace_existing=True ) (job.pause, job.resume)[sickrage.srConfig.USE_SUBTITLES]() # add scheduler callback self.SCHEDULER.start()
def trimHistory(): """Trims history table to 1 month of history from today""" failed_db.FailedDB().action("DELETE FROM history WHERE date < " + str((datetime.today() - timedelta(days=30) ).strftime(History.date_format)))
def logSuccess(release): release = FailedHistory.prepareFailedName(release) failed_db.FailedDB().action("DELETE FROM history WHERE release=?", [release])