def run(self, force=False): if self.amActive: return self.amActive = True update_datetime = datetime.datetime.now() update_date = update_datetime.date() if sickrage.srConfig.USE_FAILED_DOWNLOADS: FailedHistory.trimHistory() sickrage.srLogger.info("Doing full update on all shows") # select 10 'Ended' tv_shows updated more than 90 days ago to include in this update stale_should_update = [] stale_update_date = (update_date - datetime.timedelta(days=90)).toordinal() # last_update_date <= 90 days, sorted ASC because dates are ordinal sql_result = main_db.MainDB().select( "SELECT indexer_id FROM tv_shows WHERE status = 'Ended' AND last_update_indexer <= ? ORDER BY last_update_indexer ASC LIMIT 10;", [stale_update_date]) for cur_result in sql_result: stale_should_update.append(int(cur_result[b'indexer_id'])) # start update process piList = [] for curShow in sickrage.srCore.SHOWLIST: try: # get next episode airdate curShow.nextEpisode() # if should_update returns True (not 'Ended') or show is selected stale 'Ended' then update, otherwise just refresh if curShow.should_update(update_date=update_date) or curShow.indexerid in stale_should_update: try: piList.append( sickrage.srCore.SHOWQUEUE.updateShow(curShow, True)) # @UndefinedVariable except CantUpdateShowException as e: sickrage.srLogger.debug("Unable to update show: {0}".format(str(e))) else: sickrage.srLogger.debug( "Not updating episodes for show " + curShow.name + " because it's marked as ended and last/next episode is not within the grace period.") piList.append( sickrage.srCore.SHOWQUEUE.refreshShow(curShow, True)) # @UndefinedVariable except (CantUpdateShowException, CantRefreshShowException) as e: sickrage.srLogger.error("Automatic update failed: {}".format(e.message)) ProgressIndicators.setIndicator('dailyUpdate', QueueProgressIndicator("Daily Update", piList)) sickrage.srLogger.info("Completed full update on all shows") self.amActive = False
def run(self): QueueItem.run(self) self.started = True try: for epObj in self.segment: sickrage.srLogger.info("Marking episode as bad: [" + epObj.prettyName() + "]") FailedHistory.markFailed(epObj) (release, provider) = FailedHistory.findFailedRelease(epObj) if release: FailedHistory.logFailed(release) History.logFailed(epObj, release, provider) FailedHistory.revertFailedEpisode(epObj) sickrage.srLogger.info( "Beginning failed download search for: [" + epObj.prettyName() + "]") # If it is wanted, self.downCurQuality doesnt matter # if it isnt wanted, we need to make sure to not overwrite the existing ep that we reverted to! searchResult = searchProviders(self.show, self.segment, True, False) if searchResult: for result in searchResult: # just use the first result for now sickrage.srLogger.info("Downloading " + result.name + " from " + result.provider.name) snatchEpisode(result) # give the CPU a break time.sleep(cpu_presets[sickrage.srConfig.CPU_PRESET]) else: pass # LOGGER.info(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]") except Exception: sickrage.srLogger.debug(traceback.format_exc()) ### Keep a list with the 100 last executed searches fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE) if self.success is None: self.success = False
def run(self): QueueItem.run(self) self.started = True try: for epObj in self.segment: sickrage.srLogger.info("Marking episode as bad: [" + epObj.prettyName() + "]") FailedHistory.markFailed(epObj) (release, provider) = FailedHistory.findFailedRelease(epObj) if release: FailedHistory.logFailed(release) History.logFailed(epObj, release, provider) FailedHistory.revertFailedEpisode(epObj) sickrage.srLogger.info("Beginning failed download search for: [" + epObj.prettyName() + "]") # If it is wanted, self.downCurQuality doesnt matter # if it isnt wanted, we need to make sure to not overwrite the existing ep that we reverted to! searchResult = searchProviders(self.show, self.segment, True, False) if searchResult: for result in searchResult: # just use the first result for now sickrage.srLogger.info("Downloading " + result.name + " from " + result.provider.name) snatchEpisode(result) # give the CPU a break gen.sleep(cpu_presets[sickrage.srConfig.CPU_PRESET]) else: pass # LOGGER.info(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]") except Exception: sickrage.srLogger.debug(traceback.format_exc()) ### Keep a list with the 100 last executed searches fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE) if self.success is None: self.success = False self.finish()
def snatchEpisode(result, endStatus=SNATCHED): """ Contains the internal logic necessary to actually "snatch" a result that has been found. :param result: SearchResult instance to be snatched. :param endStatus: the episode status that should be used for the episode object once it's snatched. :return: boolean, True on success """ if result is None: return False result.priority = 0 # -1 = low, 0 = normal, 1 = high if sickrage.srConfig.ALLOW_HIGH_PRIORITY: # if it aired recently make it high priority for curEp in result.episodes: if date.today() - curEp.airdate <= timedelta(days=7): result.priority = 1 if re.search(r'(^|[\. _-])(proper|repack)([\. _-]|$)', result.name, re.I) is not None: endStatus = SNATCHED_PROPER if result.url.startswith('magnet') or result.url.endswith('torrent'): result.resultType = 'torrent' # NZBs can be sent straight to SAB or saved to disk if result.resultType in ("nzb", "nzbdata"): if sickrage.srConfig.NZB_METHOD == "blackhole": dlResult = _downloadResult(result) elif sickrage.srConfig.NZB_METHOD == "sabnzbd": dlResult = SabNZBd.sendNZB(result) elif sickrage.srConfig.NZB_METHOD == "nzbget": is_proper = True if endStatus == SNATCHED_PROPER else False dlResult = NZBGet.sendNZB(result, is_proper) else: sickrage.srLogger.error("Unknown NZB action specified in config: " + sickrage.srConfig.NZB_METHOD) dlResult = False # TORRENTs can be sent to clients or saved to disk elif result.resultType == "torrent": # torrents are saved to disk when blackhole mode if sickrage.srConfig.TORRENT_METHOD == "blackhole": dlResult = _downloadResult(result) else: if not result.content and not result.url.startswith('magnet'): result.content = result.provider.getURL(result.url, needBytes=True) if result.content or result.url.startswith('magnet'): client = getClientIstance(sickrage.srConfig.TORRENT_METHOD)() dlResult = client.sendTORRENT(result) else: sickrage.srLogger.warning("Torrent file content is empty") dlResult = False else: sickrage.srLogger.error("Unknown result type, unable to download it (%r)" % result.resultType) dlResult = False if not dlResult: return False if sickrage.srConfig.USE_FAILED_DOWNLOADS: FailedHistory.logSnatch(result) notifications.message('Episode snatched', result.name) History.logSnatch(result) # don't notify when we re-download an episode sql_l = [] trakt_data = [] for curEpObj in result.episodes: with curEpObj.lock: if isFirstBestMatch(result): curEpObj.status = Quality.compositeStatus(SNATCHED_BEST, result.quality) else: curEpObj.status = Quality.compositeStatus(endStatus, result.quality) sql_q = curEpObj.saveToDB(False) if sql_q: sql_l.append(sql_q) del sql_q # cleanup if curEpObj.status not in Quality.DOWNLOADED: try: srNotifiers.notify_snatch( curEpObj._format_pattern('%SN - %Sx%0E - %EN - %QN') + " from " + result.provider.name) except: sickrage.srLogger.debug("Failed to send snatch notification") trakt_data.append((curEpObj.season, curEpObj.episode)) data = sickrage.srCore.NOTIFIERS.trakt_notifier.trakt_episode_data_generate(trakt_data) if sickrage.srConfig.USE_TRAKT and sickrage.srConfig.TRAKT_SYNC_WATCHLIST: sickrage.srLogger.debug("Add episodes, showid: indexerid " + str(result.show.indexerid) + ", Title " + str( result.show.name) + " to Traktv Watchlist") if data: sickrage.srCore.NOTIFIERS.trakt_notifier.update_watchlist(result.show, data_episode=data, update="add") if len(sql_l) > 0: main_db.MainDB().mass_upsert(sql_l) del sql_l # cleanup return True
def pickBestResult(results, show): """ Find the best result out of a list of search results for a show :param results: list of result objects :param show: Shows we check for :return: best result object """ results = results if isinstance(results, list) else [results] sickrage.srLogger.debug("Picking the best result out of " + str([x.name for x in results])) bestResult = None # find the best result for the current episode for cur_result in results: if show and cur_result.show is not show: continue # build the black And white list if show.is_anime: if not show.release_groups.is_valid(cur_result): continue sickrage.srLogger.info("Quality of " + cur_result.name + " is " + Quality.qualityStrings[cur_result.quality]) anyQualities, bestQualities = Quality.splitQuality(show.quality) if cur_result.quality not in anyQualities + bestQualities: sickrage.srLogger.debug(cur_result.name + " is a quality we know we don't want, rejecting it") continue if show.rls_ignore_words and show_names.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_ignore_words): sickrage.srLogger.info( "Ignoring " + cur_result.name + " based on ignored words filter: " + show.rls_ignore_words) continue if show.rls_require_words and not show_names.containsAtLeastOneWord(cur_result.name, cur_result.show.rls_require_words): sickrage.srLogger.info( "Ignoring " + cur_result.name + " based on required words filter: " + show.rls_require_words) continue if not show_names.filterBadReleases(cur_result.name, parse=False): sickrage.srLogger.info( "Ignoring " + cur_result.name + " because its not a valid scene release that we want, ignoring it") continue if hasattr(cur_result, 'size'): if sickrage.srConfig.USE_FAILED_DOWNLOADS and FailedHistory.hasFailed(cur_result.name, cur_result.size, cur_result.provider.name): sickrage.srLogger.info(cur_result.name + " has previously failed, rejecting it") continue if not bestResult: bestResult = cur_result elif cur_result.quality in bestQualities and ( bestResult.quality < cur_result.quality or bestResult.quality not in bestQualities): bestResult = cur_result elif cur_result.quality in anyQualities and bestResult.quality not in bestQualities and bestResult.quality < cur_result.quality: bestResult = cur_result elif bestResult.quality == cur_result.quality: if "proper" in cur_result.name.lower() or "repack" in cur_result.name.lower(): bestResult = cur_result elif "internal" in bestResult.name.lower() and "internal" not in cur_result.name.lower(): bestResult = cur_result elif "xvid" in bestResult.name.lower() and "x264" in cur_result.name.lower(): sickrage.srLogger.info("Preferring " + cur_result.name + " (x264 over xvid)") bestResult = cur_result if bestResult: sickrage.srLogger.debug("Picked " + bestResult.name + " as the best") else: sickrage.srLogger.debug("No result picked.") return bestResult
def run(self, force=False): """ Runs the daily searcher, queuing selected episodes for search :param force: Force search """ if self.amActive: return self.amActive = True # trim failed download history if sickrage.srConfig.USE_FAILED_DOWNLOADS: FailedHistory.trimHistory() sickrage.srLogger.info("Searching for new released episodes ...") if tz_updater.load_network_dict(): curDate = (date.today() + timedelta(days=1)).toordinal() else: curDate = (date.today() + timedelta(days=2)).toordinal() curTime = datetime.now(tz_updater.sr_timezone) sqlResults = main_db.MainDB().select( "SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND (airdate <= ? AND airdate > 1)", [UNAIRED, curDate]) sql_l = [] show = None for sqlEp in sqlResults: try: if not show or int(sqlEp[b"showid"]) != show.indexerid: show = findCertainShow(sickrage.srCore.SHOWLIST, int(sqlEp[b"showid"])) # for when there is orphaned series in the database but not loaded into our showlist if not show or show.paused: continue except MultipleShowObjectsException: sickrage.srLogger.info("ERROR: expected to find a single show matching " + str(sqlEp[b'showid'])) continue if show.airs and show.network: # This is how you assure it is always converted to local time air_time = tz_updater.parse_date_time(sqlEp[b'airdate'], show.airs, show.network).astimezone( tz_updater.sr_timezone) # filter out any episodes that haven't started airing yet, # but set them to the default status while they are airing # so they are snatched faster if air_time > curTime: continue ep = show.getEpisode(int(sqlEp[b"season"]), int(sqlEp[b"episode"])) with ep.lock: if ep.season == 0: sickrage.srLogger.info( "New episode " + ep.prettyName() + " airs today, setting status to SKIPPED because is a special season") ep.status = SKIPPED else: sickrage.srLogger.info("New episode %s airs today, setting to default episode status for this show: %s" % ( ep.prettyName(), statusStrings[ep.show.default_ep_status])) ep.status = ep.show.default_ep_status sql_q = ep.saveToDB(False) if sql_q: sql_l.append(sql_q) del sql_q if len(sql_l) > 0: main_db.MainDB().mass_upsert(sql_l) del sql_l else: sickrage.srLogger.info("No new released episodes found ...") # queue episode for daily search sickrage.srCore.SEARCHQUEUE.add_item(DailySearchQueueItem()) self.amActive = False