Esempio n. 1
0
    def findFailedRelease(epObj):
        """
        Find releases in history by show ID and season.
        Return None for release if multiple found or no release found.
        """

        release = None
        provider = None

        # Clear old snatches for this release if any exist
        failed_db.FailedDB().action("DELETE FROM history WHERE showid=" + str(epObj.show.indexerid) + " AND season=" + str(
                epObj.season) + " AND episode=" + str(
                epObj.episode) + " AND date < (SELECT max(date) FROM history WHERE showid=" + str(
                epObj.show.indexerid) + " AND season=" + str(epObj.season) + " AND episode=" + str(epObj.episode) + ")")

        # Search for release in snatch history
        results = failed_db.FailedDB().select("SELECT release, provider, DATE FROM history WHERE showid=? AND season=? AND episode=?",
                              [epObj.show.indexerid, epObj.season, epObj.episode])

        for result in results:
            release = str(result["release"])
            provider = str(result["provider"])
            date = result["date"]

            # Clear any incomplete snatch records for this release if any exist
            failed_db.FailedDB().action("DELETE FROM history WHERE release=? AND DATE!=?", [release, date])

            # Found a previously failed release
            sickrage.srCore.srLogger.debug("Failed release found for season (%s): (%s)" % (epObj.season, result["release"]))
            return (release, provider)

        # Release was not found
        sickrage.srCore.srLogger.debug("No releases found for season (%s) of (%s)" % (epObj.season, epObj.show.indexerid))
        return (release, provider)
Esempio n. 2
0
def setUp_test_db(force=False):
    """upgrades the db to the latest version
    """

    global TESTDB_INITALIZED

    if not TESTDB_INITALIZED or force:
        # remove old db files
        tearDown_test_db()

        # upgrade main
        main_db.MainDB().InitialSchema().upgrade()

        # sanity check main
        main_db.MainDB().SanityCheck()

        # upgrade cache
        cache_db.CacheDB().InitialSchema().upgrade()

        # upgrade failed
        failed_db.FailedDB().InitialSchema().upgrade()

        # populate scene exceiptions table
        # retrieve_exceptions(False, False)

        TESTDB_INITALIZED = True
Esempio n. 3
0
    def logSnatch(searchResult):
        """
        Logs a successful snatch

        :param searchResult: Search result that was successful
        """
        logDate = datetime.datetime.today().strftime(History.date_format)
        release = FailedHistory.prepareFailedName(searchResult.name)

        providerClass = searchResult.provider
        if providerClass is not None:
            provider = providerClass.name
        else:
            provider = "unknown"

        show_obj = searchResult.episodes[0].show

        for episode in searchResult.episodes:
            failed_db.FailedDB().action(
                "INSERT INTO history (date, size, release, provider, showid, season, episode, old_status)"
                "VALUES (?, ?, ?, ?, ?, ?, ?, ?)", [
                    logDate, searchResult.size, release, provider,
                    show_obj.indexerid, episode.season, episode.episode,
                    episode.status
                ])
Esempio n. 4
0
    def revertFailedEpisode(epObj):
        """Restore the episodes of a failed download to their original state"""
        sql_results = failed_db.FailedDB().select(
            "SELECT * FROM history WHERE showid=? AND season=?",
            [epObj.show.indexerid, epObj.season])

        history_eps = dict([(res[b"episode"], res) for res in sql_results])

        try:
            sickrage.LOGGER.info("Reverting episode (%s, %s): %s" %
                                 (epObj.season, epObj.episode, epObj.name))
            with epObj.lock:
                if epObj.episode in history_eps:
                    sickrage.LOGGER.info("Found in history")
                    epObj.status = history_eps[epObj.episode][b'old_status']
                else:
                    sickrage.LOGGER.warning(
                        "WARNING: Episode not found in history. Setting it back to WANTED"
                    )
                    epObj.status = WANTED
                    epObj.saveToDB()

        except EpisodeNotFoundException as e:
            sickrage.LOGGER.warning(
                "Unable to create episode, please set its status manually: {}".
                format(e))
Esempio n. 5
0
    def logFailed(release):
        log_str = ""
        size = -1
        provider = ""

        release = FailedHistory.prepareFailedName(release)

        sql_results = failed_db.FailedDB().select(
            "SELECT * FROM history WHERE release=?", [release])

        if len(sql_results) == 0:
            sickrage.LOGGER.warning("Release not found in snatch history.")
        elif len(sql_results) > 1:
            sickrage.LOGGER.warning(
                "Multiple logged snatches found for release")
            sizes = len(set(x[b"size"] for x in sql_results))
            providers = len(set(x[b"provider"] for x in sql_results))
            if sizes == 1:
                sickrage.LOGGER.warning(
                    "However, they're all the same size. Continuing with found size."
                )
                size = sql_results[0][b"size"]
            else:
                sickrage.LOGGER.warning(
                    "They also vary in size. Deleting the logged snatches and recording this release with no size/provider"
                )
                for result in sql_results:
                    FailedHistory.deleteLoggedSnatch(result[b"release"],
                                                     result[b"size"],
                                                     result[b"provider"])

            if providers == 1:
                sickrage.LOGGER.info(
                    "They're also from the same provider. Using it as well.")
                provider = sql_results[0][b"provider"]
        else:
            size = sql_results[0][b"size"]
            provider = sql_results[0][b"provider"]

        if not FailedHistory.hasFailed(release, size, provider):
            failed_db.FailedDB().action(
                "INSERT INTO failed (release, size, provider) VALUES (?, ?, ?)",
                [release, size, provider])

        FailedHistory.deleteLoggedSnatch(release, size, provider)

        return log_str
Esempio n. 6
0
    def deleteLoggedSnatch(release, size, provider):
        """
        Remove a snatch from history

        :param release: release to delete
        :param size: Size of release
        :param provider: Provider to delete it from
        """
        release = FailedHistory.prepareFailedName(release)

        failed_db.FailedDB().action("DELETE FROM history WHERE release=? AND size=? AND provider=?",
                    [release, size, provider])
Esempio n. 7
0
    def hasFailed(release, size, provider="%"):
        """
        Returns True if a release has previously failed.

        If provider is given, return True only if the release is found
        with that specific provider. Otherwise, return True if the release
        is found with any provider.

        :param release: Release name to record failure
        :param size: Size of release
        :param provider: Specific provider to search (defaults to all providers)
        :return: True if a release has previously failed.
        """

        release = FailedHistory.prepareFailedName(release)

        sql_results = failed_db.FailedDB().select(
                "SELECT * FROM failed WHERE release=? AND size=? AND provider LIKE ?", [release, size, provider])

        return (len(sql_results) > 0)
Esempio n. 8
0
def setUp_test_db():
    """upgrades the db to the latest version
    """

    global TESTDB_INITALIZED

    if not TESTDB_INITALIZED:
        # remove old db files
        tearDown_test_db()

        # upgrading the db
        main_db.MainDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # and for cachedb too
        cache_db.CacheDB().InitialSchema().upgrade()

        # and for faileddb too
        failed_db.FailedDB().InitialSchema().upgrade()

        TESTDB_INITALIZED = True
Esempio n. 9
0
 def trimHistory():
     """Trims history table to 1 month of history from today"""
     failed_db.FailedDB().action("DELETE FROM history WHERE date < " + str(
             (datetime.today() - timedelta(days=30)).strftime(History.date_format)))
Esempio n. 10
0
    def logSuccess(release):
        release = FailedHistory.prepareFailedName(release)

        failed_db.FailedDB().action("DELETE FROM history WHERE release=?", [release])
Esempio n. 11
0
def initialize():
    if not sickrage.INITIALIZED:
        with threading.Lock():
            # init encoding
            encodingInit()

            # Check if we need to perform a restore first
            os.chdir(sickrage.DATA_DIR)
            restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
            if os.path.exists(restore_dir):
                success = restoreDB(restore_dir, sickrage.DATA_DIR)
                sickrage.LOGGER.info(
                    "Restore: restoring DB and config.ini %s!\n" %
                    ("FAILED", "SUCCESSFUL")[success])

            # init indexerApi
            sickrage.INDEXER_API = indexerApi

            # initialize notifiers
            sickrage.NOTIFIERS = AttrDict(
                libnotify=LibnotifyNotifier(),
                kodi_notifier=KODINotifier(),
                plex_notifier=PLEXNotifier(),
                emby_notifier=EMBYNotifier(),
                nmj_notifier=NMJNotifier(),
                nmjv2_notifier=NMJv2Notifier(),
                synoindex_notifier=synoIndexNotifier(),
                synology_notifier=synologyNotifier(),
                pytivo_notifier=pyTivoNotifier(),
                growl_notifier=GrowlNotifier(),
                prowl_notifier=ProwlNotifier(),
                libnotify_notifier=LibnotifyNotifier(),
                pushover_notifier=PushoverNotifier(),
                boxcar_notifier=BoxcarNotifier(),
                boxcar2_notifier=Boxcar2Notifier(),
                nma_notifier=NMA_Notifier(),
                pushalot_notifier=PushalotNotifier(),
                pushbullet_notifier=PushbulletNotifier(),
                freemobile_notifier=FreeMobileNotifier(),
                twitter_notifier=TwitterNotifier(),
                trakt_notifier=TraktNotifier(),
                email_notifier=EmailNotifier(),
            )

            sickrage.NAMING_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.SPORTS_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02",
                                            "01x02")
            sickrage.NAMING_MULTI_EP_TYPE = {
                0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE),
                1: [" - " + x for x in sickrage.NAMING_EP_TYPE],
                2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
            }

            sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate",
                                                  "repeat")
            sickrage.NAMING_SEP_TYPE = (" - ", " ")
            sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space")

            # migrate old database filenames to new ones
            if not os.path.exists(main_db.MainDB().filename
                                  ) and os.path.exists("sickbeard.db"):
                helpers.moveFile("sickbeard.db", main_db.MainDB().filename)

            # init config file
            srConfig.load_config(sickrage.CONFIG_FILE, True)

            # set socket timeout
            socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT)

            # init logger
            sickrage.LOGGER = sickrage.LOGGER.__class__(
                logFile=sickrage.LOG_FILE,
                logSize=sickrage.LOG_SIZE,
                logNr=sickrage.LOG_NR,
                fileLogging=makeDir(sickrage.LOG_DIR),
                debugLogging=sickrage.DEBUG)

            # init updater and get current version
            sickrage.VERSIONUPDATER = VersionUpdater()
            sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version

            # initialize the main SB database
            main_db.MainDB().InitialSchema().upgrade()

            # initialize the cache database
            cache_db.CacheDB().InitialSchema().upgrade()

            # initialize the failed downloads database
            failed_db.FailedDB().InitialSchema().upgrade()

            # fix up any db problems
            main_db.MainDB().SanityCheck()

            if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                             'news', 'IRC'):
                sickrage.DEFAULT_PAGE = 'home'

            if not makeDir(sickrage.CACHE_DIR):
                sickrage.LOGGER.error("!!! Creating local cache dir failed")
                sickrage.CACHE_DIR = None

            # Check if we need to perform a restore of the cache folder
            try:
                restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
                if os.path.exists(restore_dir) and os.path.exists(
                        os.path.join(restore_dir, 'cache')):

                    def restore_cache(srcdir, dstdir):
                        def path_leaf(path):
                            head, tail = os.path.split(path)
                            return tail or os.path.basename(head)

                        try:
                            if os.path.isdir(dstdir):
                                bakfilename = '{}-{1}'.format(
                                    path_leaf(dstdir),
                                    datetime.datetime.strftime(
                                        datetime.date.now(), '%Y%m%d_%H%M%S'))
                                shutil.move(
                                    dstdir,
                                    os.path.join(os.path.dirname(dstdir),
                                                 bakfilename))

                            shutil.move(srcdir, dstdir)
                            sickrage.LOGGER.info(
                                "Restore: restoring cache successful")
                        except Exception as E:
                            sickrage.LOGGER.error(
                                "Restore: restoring cache failed: {}".format(
                                    E))

                    restore_cache(os.path.join(restore_dir, 'cache'),
                                  sickrage.CACHE_DIR)
            except Exception as e:
                sickrage.LOGGER.error(
                    "Restore: restoring cache failed: {}".format(e))
            finally:
                if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')):
                    try:
                        removetree(os.path.join(sickrage.DATA_DIR, 'restore'))
                    except Exception as e:
                        sickrage.LOGGER.error(
                            "Restore: Unable to remove the restore directory: {}"
                            .format(e))

                    for cleanupDir in ['mako', 'sessions', 'indexers']:
                        try:
                            removetree(
                                os.path.join(sickrage.CACHE_DIR, cleanupDir))
                        except Exception as e:
                            sickrage.LOGGER.warning(
                                "Restore: Unable to remove the cache/{} directory: {1}"
                                .format(cleanupDir, e))

            if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535:
                sickrage.WEB_PORT = 8081

            if not sickrage.WEB_COOKIE_SECRET:
                sickrage.WEB_COOKIE_SECRET = generateCookieSecret()

            # attempt to help prevent users from breaking links by using a bad url
            if not sickrage.ANON_REDIRECT.endswith('?'):
                sickrage.ANON_REDIRECT = ''

            if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS):
                sickrage.ROOT_DIRS = ''

            sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders()
            if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
                sickrage.NZB_METHOD = 'blackhole'

            if not sickrage.PROVIDER_ORDER:
                sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \
                                          sickrage.providersDict[GenericProvider.TORRENT].keys()

            if sickrage.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                               'transmission', 'deluge',
                                               'deluged', 'download_station',
                                               'rtorrent', 'qbittorrent',
                                               'mlnet'):
                sickrage.TORRENT_METHOD = 'blackhole'

            if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                         '4h', 'daily'):
                sickrage.PROPER_SEARCHER_INTERVAL = 'daily'

            if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ:
                sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ

            if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ:
                sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ

            if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ:
                sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ

            sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
            if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ:
                sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ

            if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ:
                sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ

            if sickrage.SHOWUPDATE_HOUR > 23:
                sickrage.SHOWUPDATE_HOUR = 0
            elif sickrage.SHOWUPDATE_HOUR < 0:
                sickrage.SHOWUPDATE_HOUR = 0

            if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ:
                sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ

            sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ

            if sickrage.SUBTITLES_LANGUAGES[0] == '':
                sickrage.SUBTITLES_LANGUAGES = []

            sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace(
                ":%S", "")

            # initialize metadata_providers
            sickrage.metadataProvideDict = get_metadata_generator_dict()
            for cur_metadata_tuple in [
                (sickrage.METADATA_KODI, kodi),
                (sickrage.METADATA_KODI_12PLUS, kodi_12plus),
                (sickrage.METADATA_MEDIABROWSER, mediabrowser),
                (sickrage.METADATA_PS3, ps3), (sickrage.METADATA_WDTV, wdtv),
                (sickrage.METADATA_TIVO, tivo),
                (sickrage.METADATA_MEDE8ER, mede8er)
            ]:
                (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
                tmp_provider = cur_metadata_class.metadata_class()
                tmp_provider.set_config(cur_metadata_config)

                sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider

            # init caches
            sickrage.NAMECACHE = nameCache()

            # init queues
            sickrage.SHOWUPDATER = ShowUpdater()
            sickrage.SHOWQUEUE = ShowQueue()
            sickrage.SEARCHQUEUE = SearchQueue()

            # load data for shows from database
            sickrage.showList = load_shows()

            # init searchers
            sickrage.DAILYSEARCHER = DailySearcher()
            sickrage.BACKLOGSEARCHER = BacklogSearcher()
            sickrage.PROPERSEARCHER = ProperSearcher()
            sickrage.TRAKTSEARCHER = TraktSearcher()
            sickrage.SUBTITLESEARCHER = SubtitleSearcher()

            # init scheduler
            sickrage.Scheduler = Scheduler()

            # add version checker job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.VERSIONUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours': sickrage.VERSION_UPDATER_FREQ,
                        'min': sickrage.MIN_VERSION_UPDATER_FREQ
                    }),
                name="VERSIONUPDATER",
                id="VERSIONUPDATER",
                replace_existing=True)

            # add network timezones updater job to scheduler
            sickrage.Scheduler.add_job(update_network_dict,
                                       SRIntervalTrigger(**{'days': 1}),
                                       name="TZUPDATER",
                                       id="TZUPDATER",
                                       replace_existing=True)

            # add namecache updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.NAMECACHE.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.NAMECACHE_FREQ,
                        'min': sickrage.MIN_NAMECACHE_FREQ
                    }),
                name="NAMECACHE",
                id="NAMECACHE",
                replace_existing=True)

            # add show queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SHOWQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 3}),
                                       name="SHOWQUEUE",
                                       id="SHOWQUEUE",
                                       replace_existing=True)

            # add search queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SEARCHQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 1}),
                                       name="SEARCHQUEUE",
                                       id="SEARCHQUEUE",
                                       replace_existing=True)

            # add show updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.SHOWUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours':
                        1,
                        'start_date':
                        datetime.datetime.now().replace(
                            hour=sickrage.SHOWUPDATE_HOUR)
                    }),
                name="SHOWUPDATER",
                id="SHOWUPDATER",
                replace_existing=True)

            # add daily search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.DAILYSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.DAILY_SEARCHER_FREQ,
                        'min': sickrage.MIN_DAILY_SEARCHER_FREQ
                    }),
                name="DAILYSEARCHER",
                id="DAILYSEARCHER",
                replace_existing=True)

            # add backlog search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.BACKLOGSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.BACKLOG_SEARCHER_FREQ,
                        'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ
                    }),
                name="BACKLOG",
                id="BACKLOG",
                replace_existing=True)

            # add auto-postprocessing job to scheduler
            job = sickrage.Scheduler.add_job(
                auto_postprocessor.PostProcessor().run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ,
                        'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ
                    }),
                name="POSTPROCESSOR",
                id="POSTPROCESSOR",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]()

            # add find propers job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.PROPERSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': {
                            '15m': 15,
                            '45m': 45,
                            '90m': 90,
                            '4h': 4 * 60,
                            'daily': 24 * 60
                        }[sickrage.PROPER_SEARCHER_INTERVAL]
                    }),
                name="PROPERSEARCHER",
                id="PROPERSEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]()

            # add trakt.tv checker job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.TRAKTSEARCHER.run,
                SRIntervalTrigger(**{'hours': 1}),
                name="TRAKTSEARCHER",
                id="TRAKTSEARCHER",
                replace_existing=True,
            )
            (job.pause, job.resume)[sickrage.USE_TRAKT]()

            # add subtitles finder job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.SUBTITLESEARCHER.run,
                SRIntervalTrigger(
                    **{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}),
                name="SUBTITLESEARCHER",
                id="SUBTITLESEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.USE_SUBTITLES]()

            # initialize web server
            sickrage.WEB_SERVER = SRWebServer(
                **{
                    'port':
                    int(sickrage.WEB_PORT),
                    'host':
                    sickrage.WEB_HOST,
                    'data_root':
                    sickrage.DATA_DIR,
                    'gui_root':
                    sickrage.GUI_DIR,
                    'web_root':
                    sickrage.WEB_ROOT,
                    'log_dir':
                    sickrage.WEB_LOG or sickrage.LOG_DIR,
                    'username':
                    sickrage.WEB_USERNAME,
                    'password':
                    sickrage.WEB_PASSWORD,
                    'enable_https':
                    sickrage.ENABLE_HTTPS,
                    'handle_reverse_proxy':
                    sickrage.HANDLE_REVERSE_PROXY,
                    'https_cert':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT),
                    'https_key':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY),
                    'daemonize':
                    sickrage.DAEMONIZE,
                    'pidfile':
                    sickrage.PIDFILE,
                    'stop_timeout':
                    3,
                    'nolaunch':
                    sickrage.WEB_NOLAUNCH
                })

            sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format(
                sickrage.VERSION, sickrage.CONFIG_FILE))
            sickrage.INITIALIZED = True
            return True
Esempio n. 12
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # initialize the main SB database
        main_db.MainDB().InitialSchema().upgrade()

        # initialize the cache database
        cache_db.CacheDB().InitialSchema().upgrade()

        # initialize the failed downloads database
        failed_db.FailedDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for dir in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [
            (self.srConfig.METADATA_KODI, kodi),
            (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
            (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
            (self.srConfig.METADATA_PS3, ps3),
            (self.srConfig.METADATA_WDTV, wdtv),
            (self.srConfig.METADATA_TIVO, tivo),
            (self.srConfig.METADATA_MEDE8ER, mede8er)
        ]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add show queue job
        self.srScheduler.add_job(self.SHOWQUEUE.run,
                                 srIntervalTrigger(**{'seconds': 1}),
                                 name="SHOWQUEUE",
                                 id="SHOWQUEUE")

        # add search queue job
        self.srScheduler.add_job(self.SEARCHQUEUE.run,
                                 srIntervalTrigger(**{'seconds': 1}),
                                 name="SEARCHQUEUE",
                                 id="SEARCHQUEUE")

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.NAMECACHE_FREQ,
                    'min': self.srConfig.MIN_NAMECACHE_FREQ
                }),
            name="NAMECACHE",
            id="NAMECACHE")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.DAILY_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ
                }),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.instance().start()