コード例 #1
0
ファイル: tz_updater.py プロジェクト: becian/SickRage-1
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/sb_network_timezones/network_timezones.txt'
    url_data = getURL(url, session=requests.Session())
    if not url_data:
        sickrage.LOGGER.warning(
            'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url)
        load_network_dict()
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_list = dict(
        cache_db.CacheDB().select('SELECT * FROM network_timezones;'))

    queries = []
    for network, timezone in d.iteritems():
        existing = network_list.has_key(network)
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_list[network] is not timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_list[network]

    if network_list:
        purged = [x for x in network_list]
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if queries:
        cache_db.CacheDB().mass_action(queries)
        load_network_dict()
コード例 #2
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def get_scene_exceptions(indexer_id, season=-1):
    """
    Given a indexer_id, return a list of all the scene exceptions.
    """

    exceptionsList = []

    if indexer_id not in exceptionsCache or season not in exceptionsCache[
            indexer_id]:
        exceptions = cache_db.CacheDB().select(
            "SELECT show_name FROM scene_exceptions WHERE indexer_id = ? AND season = ?",
            [indexer_id, season])
        if exceptions:
            exceptionsList = list(
                set([
                    cur_exception[b"show_name"] for cur_exception in exceptions
                ]))

            if not indexer_id in exceptionsCache:
                exceptionsCache[indexer_id] = {}
            exceptionsCache[indexer_id][season] = exceptionsList
    else:
        exceptionsList = exceptionsCache[indexer_id][season]

    if season == 1:  # if we where looking for season 1 we can add generic names
        exceptionsList += get_scene_exceptions(indexer_id, season=-1)

    return exceptionsList
コード例 #3
0
ファイル: tz_updater.py プロジェクト: becian/SickRage-1
def load_network_dict():
    """
    Load network timezones from db into dict network_dict (global dict)
    """
    try:
        cur_network_list = cache_db.CacheDB().select(
            'SELECT * FROM network_timezones;')
        if not cur_network_list:
            update_network_dict()
            cur_network_list = cache_db.CacheDB().select(
                'SELECT * FROM network_timezones;')
        d = dict(cur_network_list)
    except Exception:
        d = {}

    return d
コード例 #4
0
ファイル: name_cache.py プロジェクト: becian/SickRage-1
    def saveNameCacheToDb(self):
        """Commit cache to database file"""

        for name, indexer_id in self.cache.items():
            cache_db.CacheDB().action(
                "INSERT OR REPLACE INTO scene_names (indexer_id, name) VALUES (?, ?)",
                [indexer_id, name])
コード例 #5
0
def setUp_test_db(force=False):
    """upgrades the db to the latest version
    """

    global TESTDB_INITALIZED

    if not TESTDB_INITALIZED or force:
        # remove old db files
        tearDown_test_db()

        # upgrade main
        main_db.MainDB().InitialSchema().upgrade()

        # sanity check main
        main_db.MainDB().SanityCheck()

        # upgrade cache
        cache_db.CacheDB().InitialSchema().upgrade()

        # upgrade failed
        failed_db.FailedDB().InitialSchema().upgrade()

        # populate scene exceiptions table
        # retrieve_exceptions(False, False)

        TESTDB_INITALIZED = True
コード例 #6
0
ファイル: name_cache.py プロジェクト: Jusedawg/SiCKRAGETV
    def clearCache(self, indexerid=0):
        """
        Deletes all "unknown" entries from the cache (names with indexer_id of 0).
        """
        cache_db.CacheDB().action("DELETE FROM scene_names WHERE indexer_id = ? OR indexer_id = ?", (indexerid, 0))

        toRemove = [key for key, value in self.cache.items() if value == 0 or value == indexerid]
        for key in toRemove:
            del self.cache[key]
コード例 #7
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def setLastRefresh(exList):
    """
    Update last cache update time for shows in list

    :param exList: exception list to set refresh time
    """
    cache_db.CacheDB().upsert("scene_exceptions_refresh", {
        'last_refreshed':
        int(time.mktime(datetime.datetime.today().timetuple()))
    }, {'list': exList})
コード例 #8
0
    def test_sceneExceptionsResetNameCache(self):
        # clear the exceptions
        cache_db.CacheDB().action("DELETE FROM scene_exceptions")

        # put something in the cache
        sickrage.srCore.NAMECACHE.addNameToCache('Cached Name', 0)

        # updating should not clear the cache this time since our exceptions didn't change
        self.assertEqual(
            sickrage.srCore.NAMECACHE.retrieveNameFromCache('Cached Name'), 0)
コード例 #9
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def update_scene_exceptions(indexer_id, scene_exceptions, season=-1):
    """
    Given a indexer_id, and a list of all show scene exceptions, update the db.
    """
    cache_db.CacheDB().action(
        'DELETE FROM scene_exceptions WHERE indexer_id=? AND season=?',
        [indexer_id, season])

    sickrage.LOGGER.info("Updating scene exceptions")

    # A change has been made to the scene exception list. Let's clear the cache, to make this visible
    if indexer_id in exceptionsCache:
        exceptionsCache[indexer_id] = {}
        exceptionsCache[indexer_id][season] = scene_exceptions

    for cur_exception in scene_exceptions:
        cache_db.CacheDB().action(
            "INSERT INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?)",
            [indexer_id, cur_exception, season])
コード例 #10
0
    def run(self, force=False):
        if self.amActive:
            return

        self.amActive = True

        # set thread name
        threading.currentThread().setName(self.name)

        update_timestamp = time.mktime(datetime.datetime.now().timetuple())

        sqlResult = cache_db.CacheDB().select('SELECT `time` FROM lastUpdate WHERE provider = ?', ['theTVDB'])
        if sqlResult:
            last_update = sqlResult[0]['time']
        else:
            last_update = time.mktime(datetime.datetime.min.timetuple())
            cache_db.CacheDB().action('INSERT INTO lastUpdate (provider, `time`) VALUES (?, ?)',
                                      ['theTVDB', long(last_update)])

        if sickrage.srCore.srConfig.USE_FAILED_DOWNLOADS:
            FailedHistory.trimHistory()

        # get indexer updated show ids
        updated_shows = srIndexerApi(1).indexer(**srIndexerApi(1).api_params.copy()).updated(long(last_update))

        # start update process
        piList = []
        for curShow in sickrage.srCore.SHOWLIST:
            try:
                curShow.nextEpisode()
                if curShow.indexerid in set(d["id"] for d in updated_shows or {}):
                    piList.append(sickrage.srCore.SHOWQUEUE.updateShow(curShow, True))
                else:
                    piList.append(sickrage.srCore.SHOWQUEUE.refreshShow(curShow, False))
            except (CantUpdateShowException, CantRefreshShowException) as e:
                continue

        ProgressIndicators.setIndicator('dailyShowUpdates', QueueProgressIndicator("Daily Show Updates", piList))

        cache_db.CacheDB().action('UPDATE lastUpdate SET `time` = ? WHERE provider=?',
                                  [long(update_timestamp), 'theTVDB'])

        self.amActive = False
コード例 #11
0
def update_network_dict():
    """Update timezone information from SR repositories"""

    url = 'http://sickragetv.github.io/network_timezones/network_timezones.txt'

    try:
        url_data = sickrage.srCore.srWebSession.get(url).text
    except Exception:
        sickrage.srCore.srLogger.warning(
            'Updating network timezones failed, this can happen from time to time. URL: %s'
            % url)
        return

    d = {}
    try:
        for line in url_data.splitlines():
            (key, val) = line.strip().rsplit(':', 1)
            if key is None or val is None:
                continue
            d[key] = val
    except (IOError, OSError):
        pass

    network_timezones = load_network_dict()

    queries = []
    for network, timezone in d.items():
        existing = network in network_timezones
        if not existing:
            queries.append([
                'INSERT OR IGNORE INTO network_timezones VALUES (?,?);',
                [network, timezone]
            ])
        elif network_timezones[network] is not timezone:
            queries.append([
                'UPDATE OR IGNORE network_timezones SET timezone = ? WHERE network_name = ?;',
                [timezone, network]
            ])

        if existing:
            del network_timezones[network]

    if network_timezones:
        purged = [x for x in network_timezones]
        queries.append([
            'DELETE FROM network_timezones WHERE network_name IN (%s);' %
            ','.join(['?'] * len(purged)), purged
        ])

    if len(queries) > 0:
        cache_db.CacheDB().mass_action(queries)
        del queries  # cleanup
コード例 #12
0
ファイル: name_cache.py プロジェクト: Jusedawg/SiCKRAGETV
    def addNameToCache(self, name, indexer_id=0):
        """
        Adds the show & tvdb id to the scene_names table in cache.db.

        :param name: The show name to cache
        :param indexer_id: the TVDB id that this show should be cached with (can be None/0 for unknown)
        """
        # standardize the name we're using to account for small differences in providers
        name = full_sanitizeSceneName(name)
        if name not in self.cache:
            self.cache[name] = int(indexer_id)
            cache_db.CacheDB().action("INSERT OR REPLACE INTO scene_names (indexer_id, name) VALUES (?, ?)",
                                      [indexer_id, name])
コード例 #13
0
def load_network_dict():
    """
    Return network timezones from db
    """
    try:
        cur_network_list = cache_db.CacheDB().select(
            'SELECT * FROM network_timezones;')
        if cur_network_list:
            return dict(cur_network_list)
    except Exception:
        pass

    return {}
コード例 #14
0
def get_scene_exception_by_name_multiple(show_name):
    """
    Given a show name, return the indexerid of the exception, None if no exception
    is present.
    """

    # try the obvious case first
    exception_result = cache_db.CacheDB().select(
        "SELECT indexer_id, season FROM scene_exceptions WHERE LOWER(show_name) = ? ORDER BY season ASC",
        [show_name.lower()])
    if exception_result:
        return [(int(x["indexer_id"]), int(x["season"]))
                for x in exception_result]

    out = []
    all_exception_results = cache_db.CacheDB().select(
        "SELECT show_name, indexer_id, season FROM scene_exceptions")

    for cur_exception in all_exception_results:

        cur_exception_name = cur_exception["show_name"]
        cur_indexer_id = int(cur_exception["indexer_id"])
        cur_season = int(cur_exception["season"])

        if show_name.lower() in (
                cur_exception_name.lower(),
                sanitizeSceneName(cur_exception_name).lower().replace(
                    '.', ' ')):
            sickrage.srCore.srLogger.debug(
                "Scene exception lookup got indexer id " +
                str(cur_indexer_id) + ", using that")
            out.append((cur_indexer_id, cur_season))

    if out:
        return out

    return [(None, None)]
コード例 #15
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def shouldRefresh(exList):
    """
    Check if we should refresh cache for items in exList

    :param exList: exception list to check if needs a refresh
    :return: True if refresh is needed
    """
    MAX_REFRESH_AGE_SECS = 86400  # 1 day

    rows = cache_db.CacheDB().select(
        "SELECT last_refreshed FROM scene_exceptions_refresh WHERE list = ?",
        [exList])
    if rows:
        lastRefresh = int(rows[0][b'last_refreshed'])
        return int(time.mktime(datetime.datetime.today().timetuple())
                   ) > lastRefresh + MAX_REFRESH_AGE_SECS
    else:
        return True
コード例 #16
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def get_all_scene_exceptions(indexer_id):
    """
    Get all scene exceptions for a show ID

    :param indexer_id: ID to check
    :return: dict of exceptions
    """
    exceptionsDict = {}

    exceptions = cache_db.CacheDB().select(
        "SELECT show_name,season FROM scene_exceptions WHERE indexer_id = ?",
        [indexer_id])

    if exceptions:
        for cur_exception in exceptions:
            if not cur_exception[b"season"] in exceptionsDict:
                exceptionsDict[cur_exception[b"season"]] = []
            exceptionsDict[cur_exception[b"season"]].append(
                cur_exception[b"show_name"])

    return exceptionsDict
コード例 #17
0
    def test_allPossibleShowNames(self):
        exceptionsCache[-1] = ['Exception Test']
        cache_db.CacheDB().action(
            "INSERT INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?)",
            [-1, 'Exception Test', -1])
        countryList['Full Country Name'] = 'FCN'

        self._test_allPossibleShowNames('Show Name', expected=['Show Name'])
        self._test_allPossibleShowNames(
            'Show Name', -1, expected=['Show Name', 'Exception Test'])
        self._test_allPossibleShowNames(
            'Show Name FCN',
            expected=['Show Name FCN', 'Show Name (Full Country Name)'])
        self._test_allPossibleShowNames(
            'Show Name (FCN)',
            expected=['Show Name (FCN)', 'Show Name (Full Country Name)'])
        self._test_allPossibleShowNames(
            'Show Name Full Country Name',
            expected=['Show Name Full Country Name', 'Show Name (FCN)'])
        self._test_allPossibleShowNames(
            'Show Name (Full Country Name)',
            expected=['Show Name (Full Country Name)', 'Show Name (FCN)'])
コード例 #18
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def get_scene_seasons(indexer_id):
    """
    return a list of season numbers that have scene exceptions
    """
    exceptionsSeasonList = []

    if indexer_id not in exceptionsSeasonCache:
        sqlResults = cache_db.CacheDB().select(
            "SELECT DISTINCT(season) AS season FROM scene_exceptions WHERE indexer_id = ?",
            [indexer_id])
        if sqlResults:
            exceptionsSeasonList = list(
                set([int(x[b"season"]) for x in sqlResults]))

            if not indexer_id in exceptionsSeasonCache:
                exceptionsSeasonCache[indexer_id] = {}

            exceptionsSeasonCache[indexer_id] = exceptionsSeasonList
    else:
        exceptionsSeasonList = exceptionsSeasonCache[indexer_id]

    return exceptionsSeasonList
コード例 #19
0
ファイル: __init__.py プロジェクト: becian/SickRage-1
def setUp_test_db():
    """upgrades the db to the latest version
    """

    global TESTDB_INITALIZED

    if not TESTDB_INITALIZED:
        # remove old db files
        tearDown_test_db()

        # upgrading the db
        main_db.MainDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # and for cachedb too
        cache_db.CacheDB().InitialSchema().upgrade()

        # and for faileddb too
        failed_db.FailedDB().InitialSchema().upgrade()

        TESTDB_INITALIZED = True
コード例 #20
0
ファイル: name_cache.py プロジェクト: Jusedawg/SiCKRAGETV
    def loadNameCacheFromDB(self):
        sqlResults = cache_db.CacheDB(row_type='dict').select(
            "SELECT indexer_id, name FROM scene_names")

        return dict((row["name"], int(row["indexer_id"])) for row in sqlResults)
コード例 #21
0
def retrieve_exceptions(get_xem=True, get_anidb=True):
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in srIndexerApi().indexers:
        indexer_name = srIndexerApi(indexer).name

        if shouldRefresh(indexer_name):
            sickrage.srCore.srLogger.info(
                "Checking for SiCKRAGE scene exception updates on {}".format(
                    indexer_name))
            loc = srIndexerApi(indexer).config['scene_loc']

            try:
                # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
                cur_line = None
                for cur_line in sickrage.srCore.srWebSession.get(
                        loc).text.splitlines():
                    indexer_id, _, aliases = cur_line.partition(
                        ':')  # @UnusedVariable
                    if not aliases:
                        continue

                    # regex out the list of shows, taking \' into account
                    exception_dict[int(indexer_id)] = [{
                        re.sub(r'\\(.)', r'\1', x):
                        -1
                    } for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                if cur_line is None:
                    sickrage.srCore.srLogger.debug(
                        "Check scene exceptions update failed. Unable to update from: {}"
                        .format(loc))
                    continue

                # refreshed successfully
                setLastRefresh(indexer_name)
            except Exception:
                continue

    # XEM scene exceptions
    if get_xem:
        _xem_exceptions_fetcher()

    # AniDB scene exceptions
    if get_anidb:
        _anidb_exceptions_fetcher()

    sql_l = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select(
            "SELECT * FROM scene_exceptions WHERE indexer_id = ?;",
            [cur_indexer_id])
        existing_exceptions = [x["show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.items():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    sql_l.append([
                        "INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                        [cur_indexer_id, cur_exception, curSeason]
                    ])
    if len(sql_l) > 0:
        cache_db.CacheDB().mass_action(sql_l)
        sickrage.srCore.srLogger.debug("Updated scene exceptions")
        del sql_l  # cleanup
    else:
        sickrage.srCore.srLogger.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()
コード例 #22
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # initialize the main SB database
        main_db.MainDB().InitialSchema().upgrade()

        # initialize the cache database
        cache_db.CacheDB().InitialSchema().upgrade()

        # initialize the failed downloads database
        failed_db.FailedDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for dir in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [
            (self.srConfig.METADATA_KODI, kodi),
            (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
            (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
            (self.srConfig.METADATA_PS3, ps3),
            (self.srConfig.METADATA_WDTV, wdtv),
            (self.srConfig.METADATA_TIVO, tivo),
            (self.srConfig.METADATA_MEDE8ER, mede8er)
        ]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add show queue job
        self.srScheduler.add_job(self.SHOWQUEUE.run,
                                 srIntervalTrigger(**{'seconds': 1}),
                                 name="SHOWQUEUE",
                                 id="SHOWQUEUE")

        # add search queue job
        self.srScheduler.add_job(self.SEARCHQUEUE.run,
                                 srIntervalTrigger(**{'seconds': 1}),
                                 name="SEARCHQUEUE",
                                 id="SEARCHQUEUE")

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.NAMECACHE_FREQ,
                    'min': self.srConfig.MIN_NAMECACHE_FREQ
                }),
            name="NAMECACHE",
            id="NAMECACHE")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.DAILY_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ
                }),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.instance().start()
コード例 #23
0
def initialize():
    if not sickrage.INITIALIZED:
        with threading.Lock():
            # init encoding
            encodingInit()

            # Check if we need to perform a restore first
            os.chdir(sickrage.DATA_DIR)
            restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
            if os.path.exists(restore_dir):
                success = restoreDB(restore_dir, sickrage.DATA_DIR)
                sickrage.LOGGER.info(
                    "Restore: restoring DB and config.ini %s!\n" %
                    ("FAILED", "SUCCESSFUL")[success])

            # init indexerApi
            sickrage.INDEXER_API = indexerApi

            # initialize notifiers
            sickrage.NOTIFIERS = AttrDict(
                libnotify=LibnotifyNotifier(),
                kodi_notifier=KODINotifier(),
                plex_notifier=PLEXNotifier(),
                emby_notifier=EMBYNotifier(),
                nmj_notifier=NMJNotifier(),
                nmjv2_notifier=NMJv2Notifier(),
                synoindex_notifier=synoIndexNotifier(),
                synology_notifier=synologyNotifier(),
                pytivo_notifier=pyTivoNotifier(),
                growl_notifier=GrowlNotifier(),
                prowl_notifier=ProwlNotifier(),
                libnotify_notifier=LibnotifyNotifier(),
                pushover_notifier=PushoverNotifier(),
                boxcar_notifier=BoxcarNotifier(),
                boxcar2_notifier=Boxcar2Notifier(),
                nma_notifier=NMA_Notifier(),
                pushalot_notifier=PushalotNotifier(),
                pushbullet_notifier=PushbulletNotifier(),
                freemobile_notifier=FreeMobileNotifier(),
                twitter_notifier=TwitterNotifier(),
                trakt_notifier=TraktNotifier(),
                email_notifier=EmailNotifier(),
            )

            sickrage.NAMING_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.SPORTS_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02",
                                            "01x02")
            sickrage.NAMING_MULTI_EP_TYPE = {
                0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE),
                1: [" - " + x for x in sickrage.NAMING_EP_TYPE],
                2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
            }

            sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate",
                                                  "repeat")
            sickrage.NAMING_SEP_TYPE = (" - ", " ")
            sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space")

            # migrate old database filenames to new ones
            if not os.path.exists(main_db.MainDB().filename
                                  ) and os.path.exists("sickbeard.db"):
                helpers.moveFile("sickbeard.db", main_db.MainDB().filename)

            # init config file
            srConfig.load_config(sickrage.CONFIG_FILE, True)

            # set socket timeout
            socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT)

            # init logger
            sickrage.LOGGER = sickrage.LOGGER.__class__(
                logFile=sickrage.LOG_FILE,
                logSize=sickrage.LOG_SIZE,
                logNr=sickrage.LOG_NR,
                fileLogging=makeDir(sickrage.LOG_DIR),
                debugLogging=sickrage.DEBUG)

            # init updater and get current version
            sickrage.VERSIONUPDATER = VersionUpdater()
            sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version

            # initialize the main SB database
            main_db.MainDB().InitialSchema().upgrade()

            # initialize the cache database
            cache_db.CacheDB().InitialSchema().upgrade()

            # initialize the failed downloads database
            failed_db.FailedDB().InitialSchema().upgrade()

            # fix up any db problems
            main_db.MainDB().SanityCheck()

            if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                             'news', 'IRC'):
                sickrage.DEFAULT_PAGE = 'home'

            if not makeDir(sickrage.CACHE_DIR):
                sickrage.LOGGER.error("!!! Creating local cache dir failed")
                sickrage.CACHE_DIR = None

            # Check if we need to perform a restore of the cache folder
            try:
                restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
                if os.path.exists(restore_dir) and os.path.exists(
                        os.path.join(restore_dir, 'cache')):

                    def restore_cache(srcdir, dstdir):
                        def path_leaf(path):
                            head, tail = os.path.split(path)
                            return tail or os.path.basename(head)

                        try:
                            if os.path.isdir(dstdir):
                                bakfilename = '{}-{1}'.format(
                                    path_leaf(dstdir),
                                    datetime.datetime.strftime(
                                        datetime.date.now(), '%Y%m%d_%H%M%S'))
                                shutil.move(
                                    dstdir,
                                    os.path.join(os.path.dirname(dstdir),
                                                 bakfilename))

                            shutil.move(srcdir, dstdir)
                            sickrage.LOGGER.info(
                                "Restore: restoring cache successful")
                        except Exception as E:
                            sickrage.LOGGER.error(
                                "Restore: restoring cache failed: {}".format(
                                    E))

                    restore_cache(os.path.join(restore_dir, 'cache'),
                                  sickrage.CACHE_DIR)
            except Exception as e:
                sickrage.LOGGER.error(
                    "Restore: restoring cache failed: {}".format(e))
            finally:
                if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')):
                    try:
                        removetree(os.path.join(sickrage.DATA_DIR, 'restore'))
                    except Exception as e:
                        sickrage.LOGGER.error(
                            "Restore: Unable to remove the restore directory: {}"
                            .format(e))

                    for cleanupDir in ['mako', 'sessions', 'indexers']:
                        try:
                            removetree(
                                os.path.join(sickrage.CACHE_DIR, cleanupDir))
                        except Exception as e:
                            sickrage.LOGGER.warning(
                                "Restore: Unable to remove the cache/{} directory: {1}"
                                .format(cleanupDir, e))

            if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535:
                sickrage.WEB_PORT = 8081

            if not sickrage.WEB_COOKIE_SECRET:
                sickrage.WEB_COOKIE_SECRET = generateCookieSecret()

            # attempt to help prevent users from breaking links by using a bad url
            if not sickrage.ANON_REDIRECT.endswith('?'):
                sickrage.ANON_REDIRECT = ''

            if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS):
                sickrage.ROOT_DIRS = ''

            sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders()
            if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
                sickrage.NZB_METHOD = 'blackhole'

            if not sickrage.PROVIDER_ORDER:
                sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \
                                          sickrage.providersDict[GenericProvider.TORRENT].keys()

            if sickrage.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                               'transmission', 'deluge',
                                               'deluged', 'download_station',
                                               'rtorrent', 'qbittorrent',
                                               'mlnet'):
                sickrage.TORRENT_METHOD = 'blackhole'

            if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                         '4h', 'daily'):
                sickrage.PROPER_SEARCHER_INTERVAL = 'daily'

            if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ:
                sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ

            if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ:
                sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ

            if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ:
                sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ

            sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
            if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ:
                sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ

            if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ:
                sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ

            if sickrage.SHOWUPDATE_HOUR > 23:
                sickrage.SHOWUPDATE_HOUR = 0
            elif sickrage.SHOWUPDATE_HOUR < 0:
                sickrage.SHOWUPDATE_HOUR = 0

            if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ:
                sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ

            sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ

            if sickrage.SUBTITLES_LANGUAGES[0] == '':
                sickrage.SUBTITLES_LANGUAGES = []

            sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace(
                ":%S", "")

            # initialize metadata_providers
            sickrage.metadataProvideDict = get_metadata_generator_dict()
            for cur_metadata_tuple in [
                (sickrage.METADATA_KODI, kodi),
                (sickrage.METADATA_KODI_12PLUS, kodi_12plus),
                (sickrage.METADATA_MEDIABROWSER, mediabrowser),
                (sickrage.METADATA_PS3, ps3), (sickrage.METADATA_WDTV, wdtv),
                (sickrage.METADATA_TIVO, tivo),
                (sickrage.METADATA_MEDE8ER, mede8er)
            ]:
                (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
                tmp_provider = cur_metadata_class.metadata_class()
                tmp_provider.set_config(cur_metadata_config)

                sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider

            # init caches
            sickrage.NAMECACHE = nameCache()

            # init queues
            sickrage.SHOWUPDATER = ShowUpdater()
            sickrage.SHOWQUEUE = ShowQueue()
            sickrage.SEARCHQUEUE = SearchQueue()

            # load data for shows from database
            sickrage.showList = load_shows()

            # init searchers
            sickrage.DAILYSEARCHER = DailySearcher()
            sickrage.BACKLOGSEARCHER = BacklogSearcher()
            sickrage.PROPERSEARCHER = ProperSearcher()
            sickrage.TRAKTSEARCHER = TraktSearcher()
            sickrage.SUBTITLESEARCHER = SubtitleSearcher()

            # init scheduler
            sickrage.Scheduler = Scheduler()

            # add version checker job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.VERSIONUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours': sickrage.VERSION_UPDATER_FREQ,
                        'min': sickrage.MIN_VERSION_UPDATER_FREQ
                    }),
                name="VERSIONUPDATER",
                id="VERSIONUPDATER",
                replace_existing=True)

            # add network timezones updater job to scheduler
            sickrage.Scheduler.add_job(update_network_dict,
                                       SRIntervalTrigger(**{'days': 1}),
                                       name="TZUPDATER",
                                       id="TZUPDATER",
                                       replace_existing=True)

            # add namecache updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.NAMECACHE.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.NAMECACHE_FREQ,
                        'min': sickrage.MIN_NAMECACHE_FREQ
                    }),
                name="NAMECACHE",
                id="NAMECACHE",
                replace_existing=True)

            # add show queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SHOWQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 3}),
                                       name="SHOWQUEUE",
                                       id="SHOWQUEUE",
                                       replace_existing=True)

            # add search queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SEARCHQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 1}),
                                       name="SEARCHQUEUE",
                                       id="SEARCHQUEUE",
                                       replace_existing=True)

            # add show updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.SHOWUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours':
                        1,
                        'start_date':
                        datetime.datetime.now().replace(
                            hour=sickrage.SHOWUPDATE_HOUR)
                    }),
                name="SHOWUPDATER",
                id="SHOWUPDATER",
                replace_existing=True)

            # add daily search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.DAILYSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.DAILY_SEARCHER_FREQ,
                        'min': sickrage.MIN_DAILY_SEARCHER_FREQ
                    }),
                name="DAILYSEARCHER",
                id="DAILYSEARCHER",
                replace_existing=True)

            # add backlog search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.BACKLOGSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.BACKLOG_SEARCHER_FREQ,
                        'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ
                    }),
                name="BACKLOG",
                id="BACKLOG",
                replace_existing=True)

            # add auto-postprocessing job to scheduler
            job = sickrage.Scheduler.add_job(
                auto_postprocessor.PostProcessor().run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ,
                        'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ
                    }),
                name="POSTPROCESSOR",
                id="POSTPROCESSOR",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]()

            # add find propers job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.PROPERSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': {
                            '15m': 15,
                            '45m': 45,
                            '90m': 90,
                            '4h': 4 * 60,
                            'daily': 24 * 60
                        }[sickrage.PROPER_SEARCHER_INTERVAL]
                    }),
                name="PROPERSEARCHER",
                id="PROPERSEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]()

            # add trakt.tv checker job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.TRAKTSEARCHER.run,
                SRIntervalTrigger(**{'hours': 1}),
                name="TRAKTSEARCHER",
                id="TRAKTSEARCHER",
                replace_existing=True,
            )
            (job.pause, job.resume)[sickrage.USE_TRAKT]()

            # add subtitles finder job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.SUBTITLESEARCHER.run,
                SRIntervalTrigger(
                    **{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}),
                name="SUBTITLESEARCHER",
                id="SUBTITLESEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.USE_SUBTITLES]()

            # initialize web server
            sickrage.WEB_SERVER = SRWebServer(
                **{
                    'port':
                    int(sickrage.WEB_PORT),
                    'host':
                    sickrage.WEB_HOST,
                    'data_root':
                    sickrage.DATA_DIR,
                    'gui_root':
                    sickrage.GUI_DIR,
                    'web_root':
                    sickrage.WEB_ROOT,
                    'log_dir':
                    sickrage.WEB_LOG or sickrage.LOG_DIR,
                    'username':
                    sickrage.WEB_USERNAME,
                    'password':
                    sickrage.WEB_PASSWORD,
                    'enable_https':
                    sickrage.ENABLE_HTTPS,
                    'handle_reverse_proxy':
                    sickrage.HANDLE_REVERSE_PROXY,
                    'https_cert':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT),
                    'https_key':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY),
                    'daemonize':
                    sickrage.DAEMONIZE,
                    'pidfile':
                    sickrage.PIDFILE,
                    'stop_timeout':
                    3,
                    'nolaunch':
                    sickrage.WEB_NOLAUNCH
                })

            sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format(
                sickrage.VERSION, sickrage.CONFIG_FILE))
            sickrage.INITIALIZED = True
            return True
コード例 #24
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def retrieve_exceptions():
    """
    Looks up the exceptions on github, parses them into a dict, and inserts them into the
    scene_exceptions table in cache.db. Also clears the scene name cache.
    """

    for indexer in sickrage.INDEXER_API().indexers:
        if shouldRefresh(sickrage.INDEXER_API(indexer).name):
            sickrage.LOGGER.info("Checking for scene exception updates for " +
                                 sickrage.INDEXER_API(indexer).name + "")

            loc = sickrage.INDEXER_API(indexer).config[b'scene_loc']
            try:
                data = getURL(loc,
                              session=sickrage.INDEXER_API(indexer).session)
            except Exception:
                continue

            if data is None:
                # When data is None, trouble connecting to github, or reading file failed
                sickrage.LOGGER.debug(
                    "Check scene exceptions update failed. Unable to update from: "
                    + loc)
                continue

            setLastRefresh(sickrage.INDEXER_API(indexer).name)

            # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc
            for cur_line in data.splitlines():
                indexer_id, _, aliases = cur_line.partition(
                    ':')  # @UnusedVariable

                if not aliases:
                    continue

                indexer_id = int(indexer_id)

                # regex out the list of shows, taking \' into account
                # alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                alias_list = [{
                    re.sub(r'\\(.)', r'\1', x): -1
                } for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)]
                exception_dict[indexer_id] = alias_list
                del alias_list

            # cleanup
            del data

    # XEM scene exceptions
    _xem_exceptions_fetcher()
    for xem_ex in xem_exception_dict:
        if xem_ex in exception_dict:
            exception_dict[
                xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex]
        else:
            exception_dict[xem_ex] = xem_exception_dict[xem_ex]

    # AniDB scene exceptions
    _anidb_exceptions_fetcher()
    for anidb_ex in anidb_exception_dict:
        if anidb_ex in exception_dict:
            exception_dict[anidb_ex] = exception_dict[
                anidb_ex] + anidb_exception_dict[anidb_ex]
        else:
            exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex]

    queries = []
    for cur_indexer_id in exception_dict:
        sql_ex = cache_db.CacheDB().select(
            "SELECT * FROM scene_exceptions WHERE indexer_id = ?;",
            [cur_indexer_id])
        existing_exceptions = [x[b"show_name"] for x in sql_ex]
        if not cur_indexer_id in exception_dict:
            continue

        for cur_exception_dict in exception_dict[cur_indexer_id]:
            for ex in cur_exception_dict.iteritems():
                cur_exception, curSeason = ex
                if cur_exception not in existing_exceptions:
                    queries.append([
                        "INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
                        [cur_indexer_id, cur_exception, curSeason]
                    ])
    if queries:
        cache_db.CacheDB().mass_action(queries)
        sickrage.LOGGER.debug("Updated scene exceptions")
    else:
        sickrage.LOGGER.debug("No scene exceptions update needed")

    # cleanup
    exception_dict.clear()
    anidb_exception_dict.clear()
    xem_exception_dict.clear()
コード例 #25
0
ファイル: scene_exceptions.py プロジェクト: becian/SickRage-1
def getSceneSeasons(indexer_id):
    """get a list of season numbers that have scene exceptions"""
    seasons = cache_db.CacheDB().select(
        "SELECT DISTINCT season FROM scene_exceptions WHERE indexer_id = ?",
        [indexer_id])
    return [cur_exception[b"season"] for cur_exception in seasons]