Пример #1
0
class DBBasicTests(SiCKRAGETestDBCase):
    def setUp(self, **kwargs):
        super(DBBasicTests, self).setUp()
        self.db = MainDB()

    def test_select(self):
        self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
Пример #2
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop().instance()
        self.pid = os.getpid()

        self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quite = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version = None
        self.newest_version_string = None

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.sys_encoding = get_sys_encoding()
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.showlist = []

        self.api = None
        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = None
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.failed_db = None
        self.scheduler = None
        self.wserver = None
        self.wsession = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None

        # patch modules with encoding kludge
        patch_modules()

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = BackgroundScheduler()
        self.wserver = WebServer()
        self.wsession = WebSession()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()


        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'),
                                 os.path.join(self.data_dir, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                             os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:
            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq
            ),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.scheduler.add_job(
            update_network_dict,
            IntervalTrigger(
                days=1
            ),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.config.proper_searcher_interval]
            ),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.scheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.scheduler.get_job('PROPERSEARCHER').pause,
         self.scheduler.get_job('PROPERSEARCHER').resume
         )[self.config.download_propers]()

        # Pause/Resume TRAKTSEARCHER job
        (self.scheduler.get_job('TRAKTSEARCHER').pause,
         self.scheduler.get_job('TRAKTSEARCHER').resume
         )[self.config.use_trakt]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.scheduler.get_job('SUBTITLESEARCHER').pause,
         self.scheduler.get_job('SUBTITLESEARCHER').resume
         )[self.config.use_subtitles]()

        # Pause/Resume POSTPROCESS job
        (self.scheduler.get_job('POSTPROCESSOR').pause,
         self.scheduler.get_job('POSTPROCESSOR').resume
         )[self.config.process_automatically]()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db, self.failed_db]:
                if db.opened:
                    self.log.debug("Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.saveToDB()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [x['doc'] for x in self.main_db.db.all('tv_shows', with_doc=True)]:
            try:
                self.log.debug("Loading data for show: [{}]".format(dbData['show_name']))
                show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))
                show.nextEpisode()
                self.showlist += [show]
            except Exception as e:
                self.log.error("Show error in [%s]: %s" % (dbData['location'], e.message))
Пример #3
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = None
        self.pid = os.getpid()
        self.showlist = []

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",)
        self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
                                     1: [" - " + x for x in self.naming_ep_type],
                                     2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]}
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.event_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # shutdown event queue
            if self.event_queue:
                self.log.debug("Shutting down event queue")
                self.event_queue.shutdown()
                del self.event_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug("Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        if self.io_loop:
            self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.save_to_db()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))

            try:
                self.log.debug("Loading data for show: [{}]".format(show.name))
                self.showlist.append(show)
                self.quicksearch_cache.add_show(show.indexerid)
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" % (show.location, str(e)))
Пример #4
0
    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()
Пример #5
0
def map_indexers(indexer, indexer_id, name):
    session = sickrage.app.main_db.session()

    mapped = {}

    # init mapped indexers object
    for mindexer in IndexerApi().indexers:
        mapped[mindexer] = indexer_id if int(mindexer) == int(indexer) else 0

    # for each mapped entry
    for dbData in session.query(MainDB.IndexerMapping).filter_by(
            indexer_id=indexer_id, indexer=indexer):
        # Check if its mapped with both tvdb and tvrage.
        if len([i for i in dbData if i is not None]) >= 4:
            sickrage.app.log.debug(
                "Found indexer mapping in cache for show: " + name)
            mapped[int(dbData.mindexer)] = int(dbData.mindexer_id)
            return mapped
    else:
        for mindexer in IndexerApi().indexers:
            if mindexer == indexer:
                mapped[mindexer] = indexer_id
                continue

            indexer_api_parms = IndexerApi(mindexer).api_params.copy()
            indexer_api_parms['custom_ui'] = ShowListUI
            t = IndexerApi(mindexer).indexer(**indexer_api_parms)

            try:
                mapped_show = t[name]
            except Exception:
                sickrage.app.log.debug("Unable to map " +
                                       IndexerApi(indexer).name + "->" +
                                       IndexerApi(mindexer).name +
                                       " for show: " + name + ", skipping it")
                continue

            if mapped_show and len(mapped_show) == 1:
                sickrage.app.log.debug("Mapping " + IndexerApi(indexer).name +
                                       "->" + IndexerApi(mindexer).name +
                                       " for show: " + name)

                mapped[mindexer] = int(mapped_show['id'])

                sickrage.app.log.debug(
                    "Adding indexer mapping to DB for show: " + name)

                try:
                    session.query(MainDB.IndexerMapping).filter_by(
                        indexer_id=indexer_id,
                        indexer=indexer,
                        mindexer_id=int(mapped_show['id'])).one()
                except orm.exc.NoResultFound:
                    session.add(
                        MainDB.IndexerMapping(
                            **{
                                'indexer_id': indexer_id,
                                'indexer': indexer,
                                'mindexer_id': int(mapped_show['id']),
                                'mindexer': mindexer
                            }))
                    session.commit()

    return mapped
Пример #6
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop()
        self.pid = os.getpid()
        self.showlist = []

        self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = (
            "1x02",
            "s01e02",
            "S01E02",
            "01x02",
            "S01 E02",
        )
        self.naming_multi_ep_type = {
            0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
            1: [" - " + x for x in self.naming_ep_type],
            2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
        }
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [
            language for language in os.listdir(sickrage.LOCALE_DIR)
            if '_' in language
        ]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.event_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq, ),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, ),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # start webserver
        self.wserver.start()

        # fire off startup events
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # shutdown event queue
            if self.event_queue:
                self.log.debug("Shutting down event queue")
                self.event_queue.shutdown()
                del self.event_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.save_to_db()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))

            try:
                self.log.debug("Loading data for show: [{}]".format(show.name))
                self.showlist.append(show)
                self.quicksearch_cache.add_show(show.indexerid)
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" %
                               (show.location, str(e)))
Пример #7
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq, ),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, ),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # start webserver
        self.wserver.start()

        # fire off startup events
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start ioloop
        self.io_loop.start()
Пример #8
0
    def findSearchResults(self,
                          show,
                          episodes,
                          search_mode,
                          manualSearch=False,
                          downCurQuality=False,
                          cacheOnly=False):

        if not self._checkAuth:
            return

        self.show = show

        results = {}
        itemList = []

        searched_scene_season = None
        for epObj in episodes:
            # search cache for episode result
            cacheResult = self.cache.searchCache(epObj, manualSearch,
                                                 downCurQuality)
            if cacheResult:
                if epObj.episode not in results:
                    results[epObj.episode] = cacheResult[epObj.episode]
                else:
                    results[epObj.episode].extend(cacheResult[epObj.episode])

                # found result, search next episode
                continue

            # skip if season already searched
            if len(
                    episodes
            ) > 1 and search_mode == 'sponly' and searched_scene_season == epObj.scene_season:
                continue

            # mark season searched for season pack searches so we can skip later on
            searched_scene_season = epObj.scene_season

            # check if this is a cache only search
            if cacheOnly:
                continue

            search_strings = []
            if len(episodes) > 1 and search_mode == 'sponly':
                # get season search results
                search_strings = self._get_season_search_strings(epObj)
            elif search_mode == 'eponly':
                # get single episode search results
                search_strings = self._get_episode_search_strings(epObj)

            first = search_strings and isinstance(
                search_strings[0], dict) and 'rid' in search_strings[0]
            if first:
                sickrage.srCore.srLogger.debug('First search_string has rid')

            for curString in search_strings:
                try:
                    itemList += self.search(curString,
                                            search_mode,
                                            len(episodes),
                                            epObj=epObj)
                except SAXParseException:
                    continue

                if first:
                    first = False
                    if itemList:
                        sickrage.srCore.srLogger.debug(
                            'First search_string had rid, and returned results, skipping query by string'
                        )
                        break
                    else:
                        sickrage.srCore.srLogger.debug(
                            'First search_string had rid, but returned no results, searching with string query'
                        )

        # if we found what we needed already from cache then return results and exit
        if len(results) == len(episodes):
            return results

        # sort list by quality
        if len(itemList):
            items = {}
            itemsUnknown = []
            for item in itemList:
                quality = self.getQuality(item, anime=show.is_anime)
                if quality == Quality.UNKNOWN:
                    itemsUnknown += [item]
                else:
                    if quality not in items:
                        items[quality] = [item]
                    else:
                        items[quality].append(item)

            itemList = list(
                itertools.chain(
                    *[v for (k, v) in sorted(items.items(), reverse=True)]))
            itemList += itemsUnknown or []

        # filter results
        cl = []
        for item in itemList:
            (title, url) = self._get_title_and_url(item)

            # parse the file name
            try:
                myParser = NameParser(False)
                parse_result = myParser.parse(title)
            except InvalidNameException:
                sickrage.srCore.srLogger.debug(
                    "Unable to parse the filename " + title +
                    " into a valid episode")
                continue
            except InvalidShowException:
                sickrage.srCore.srLogger.debug(
                    "Unable to parse the filename " + title +
                    " into a valid show")
                continue

            showObj = parse_result.show
            quality = parse_result.quality
            release_group = parse_result.release_group
            version = parse_result.version

            addCacheEntry = False
            if not (showObj.air_by_date or showObj.sports):
                if search_mode == 'sponly':
                    if len(parse_result.episode_numbers):
                        sickrage.srCore.srLogger.debug(
                            "This is supposed to be a season pack search but the result "
                            + title +
                            " is not a valid season pack, skipping it")
                        addCacheEntry = True
                    if len(parse_result.episode_numbers) and (
                            parse_result.season_number not in set(
                                [ep.season for ep in episodes]) or not [
                                    ep for ep in episodes if ep.scene_episode
                                    in parse_result.episode_numbers
                                ]):
                        sickrage.srCore.srLogger.debug(
                            "The result " + title +
                            " doesn't seem to be a valid episode that we are trying to snatch, ignoring"
                        )
                        addCacheEntry = True
                else:
                    if not len(
                            parse_result.episode_numbers
                    ) and parse_result.season_number and not [
                            ep for ep in episodes
                            if ep.season == parse_result.season_number
                            and ep.episode in parse_result.episode_numbers
                    ]:
                        sickrage.srCore.srLogger.debug(
                            "The result " + title +
                            " doesn't seem to be a valid season that we are trying to snatch, ignoring"
                        )
                        addCacheEntry = True
                    elif len(parse_result.episode_numbers) and not [
                            ep for ep in episodes
                            if ep.season == parse_result.season_number
                            and ep.episode in parse_result.episode_numbers
                    ]:
                        sickrage.srCore.srLogger.debug(
                            "The result " + title +
                            " doesn't seem to be a valid episode that we are trying to snatch, ignoring"
                        )
                        addCacheEntry = True

                if not addCacheEntry:
                    # we just use the existing info for normal searches
                    actual_season = parse_result.season_number
                    actual_episodes = parse_result.episode_numbers
            else:
                if not parse_result.is_air_by_date:
                    sickrage.srCore.srLogger.debug(
                        "This is supposed to be a date search but the result "
                        + title + " didn't parse as one, skipping it")
                    addCacheEntry = True
                else:
                    airdate = parse_result.air_date.toordinal()
                    dbData = [
                        x['doc'] for x in MainDB().db.get_many(
                            'tv_episodes', showObj.indexerid, with_doc=True)
                        if x['doc']['airdate'] == airdate
                    ]

                    if len(dbData) != 1:
                        sickrage.srCore.srLogger.warning(
                            "Tried to look up the date for the episode " +
                            title +
                            " but the database didn't give proper results, skipping it"
                        )
                        addCacheEntry = True

                if not addCacheEntry:
                    actual_season = int(dbData[0]["season"])
                    actual_episodes = [int(dbData[0]["episode"])]

            # add parsed result to cache for usage later on
            if addCacheEntry:
                sickrage.srCore.srLogger.debug(
                    "Adding item from search to cache: " + title)
                ci = self.cache._addCacheEntry(title,
                                               url,
                                               parse_result=parse_result)
                if ci is not None:
                    cl.append(ci)
                continue

            # make sure we want the episode
            wantEp = True
            for epNo in actual_episodes:
                if not showObj.wantEpisode(actual_season, epNo, quality,
                                           manualSearch, downCurQuality):
                    wantEp = False
                    break

            if not wantEp:
                sickrage.srCore.srLogger.info(
                    "RESULT:[{}] QUALITY:[{}] IGNORED!".format(
                        title, Quality.qualityStrings[quality]))
                continue

            # make a result object
            epObjs = []
            for curEp in actual_episodes:
                epObjs.append(showObj.getEpisode(actual_season, curEp))

            result = self.getResult(epObjs)
            result.show = showObj
            result.url = url
            result.name = title
            result.quality = quality
            result.release_group = release_group
            result.version = version
            result.content = None
            result.size = self._get_size(url)
            result.files = self._get_files(url)

            sickrage.srCore.srLogger.debug(
                "FOUND RESULT:[{}] QUALITY:[{}] URL:[{}]".format(
                    title, Quality.qualityStrings[quality], url))

            if len(epObjs) == 1:
                epNum = epObjs[0].episode
                sickrage.srCore.srLogger.debug("Single episode result.")
            elif len(epObjs) > 1:
                epNum = MULTI_EP_RESULT
                sickrage.srCore.srLogger.debug(
                    "Separating multi-episode result to check for later - result contains episodes: "
                    + str(parse_result.episode_numbers))
            elif len(epObjs) == 0:
                epNum = SEASON_RESULT
                sickrage.srCore.srLogger.debug(
                    "Separating full season result to check for later")

            if epNum not in results:
                results[epNum] = [result]
            else:
                results[epNum].append(result)

        # check if we have items to add to cache
        if len(cl) > 0:
            self.cache.ProviderDB().mass_action(cl)
            del cl  # cleanup

        return results
Пример #9
0
    def load_imdb_info(self):
        imdb_info_mapper = {
            'imdbvotes': 'votes',
            'imdbrating': 'rating',
            'totalseasons': 'seasons',
            'imdbid': 'imdb_id'
        }

        if not self.imdb_id:
            try:
                resp = IMDbAPI().search_by_imdb_title(self.name)
            except APIError as e:
                sickrage.app.log.error('{!r}'.format(e))
                resp = {}

            for x in resp.get('Search', []):
                try:
                    if int(x.get('Year'), 0) == self.startyear and x.get(
                            'Title') in self.name:
                        self.imdb_id = x.get('imdbID')
                        object_session(self).safe_commit()
                        break
                except:
                    continue

        if self.imdb_id:
            sickrage.app.log.debug(
                str(self.indexer_id) + ": Obtaining IMDb info")

            try:
                imdb_info = IMDbAPI().search_by_imdb_id(self.imdb_id)
            except APIError as e:
                imdb_info = None
                sickrage.app.log.error('{!r}'.format(e))

            if not imdb_info:
                sickrage.app.log.debug(
                    str(self.indexer_id) + ': Unable to obtain IMDb info')
                return

            imdb_info = dict((k.lower(), v) for k, v in imdb_info.items())
            for column in imdb_info.copy():
                if column in imdb_info_mapper:
                    imdb_info[imdb_info_mapper[column]] = imdb_info[column]

                if column not in MainDB.IMDbInfo.__table__.columns.keys():
                    del imdb_info[column]

            if not all([
                    imdb_info.get('imdb_id'),
                    imdb_info.get('votes'),
                    imdb_info.get('rating'),
                    imdb_info.get('genre')
            ]):
                sickrage.app.log.debug(
                    str(self.indexer_id) +
                    ': IMDb info obtained does not meet our requirements')
                return

            sickrage.app.log.debug(
                str(self.indexer_id) + ": Obtained IMDb info ->" +
                str(imdb_info))

            # save imdb info to database
            imdb_info.update({
                'indexer_id': self.indexer_id,
                'last_update': datetime.date.today().toordinal()
            })

            try:
                dbData = object_session(self).query(MainDB.IMDbInfo).filter_by(
                    indexer_id=self.indexer_id).one()
                dbData.update(**imdb_info)
            except orm.exc.NoResultFound:
                object_session(self).add(MainDB.IMDbInfo(**imdb_info))
            finally:
                object_session(self).safe_commit()
Пример #10
0
    def run(self, force=False):
        if self.amActive:
            return

        self.amActive = True

        # set thread name
        threading.currentThread().setName(self.name)

        if len(getEnabledServiceList()) < 1:
            sickrage.srCore.srLogger.warning(
                'Not enough services selected. At least 1 service is required to search subtitles in the background'
            )
            return

        sickrage.srCore.srLogger.info('Checking for subtitles')

        # get episodes on which we want subtitles
        # criteria is:
        #  - show subtitles = 1
        #  - episode subtitles != config wanted languages or 'und' (depends on config multi)
        #  - search count < 2 and diff(airdate, now) > 1 week : now -> 1d
        #  - search count < 7 and diff(airdate, now) <= 1 week : now -> 4h -> 8h -> 16h -> 1d -> 1d -> 1d

        today = datetime.date.today().toordinal()

        results = []
        for s in [s['doc'] for s in MainDB().db.all('tv_shows', with_doc=True)]:
            for e in [e['doc'] for e in MainDB().db.get_many('tv_episodes', s['indexer_id'], with_doc=True)
                      if s['subtitles'] == 1
                      and e['doc']['location'] != ''
                      and e['doc']['subtitles'] not in wantedLanguages()
                      and (e['doc']['subtitles_searchcount'] <= 2 or (
                                e['doc']['subtitles_searchcount'] <= 7 and (today - e['doc']['airdate'])))]:
                results += [{
                    'show_name': s['show_name'],
                    'showid': e['showid'],
                    'season': e['season'],
                    'episode': e['episode'],
                    'status': e['status'],
                    'subtitles': e['subtitles'],
                    'searchcount': e['subtitles_searchcount'],
                    'lastsearch': e['subtitles_lastsearch'],
                    'location': e['location'],
                    'airdate_daydiff': (today - e['airdate'])
                }]

        if len(results) == 0:
            sickrage.srCore.srLogger.info('No subtitles to download')
            return

        rules = self._getRules()
        now = datetime.datetime.now()
        for epToSub in results:
            if not os.path.isfile(epToSub['location']):
                sickrage.srCore.srLogger.debug(
                    'Episode file does not exist, cannot download subtitles for episode %dx%d of show %s' % (
                        epToSub['season'], epToSub['episode'], epToSub['show_name']))
                continue

            # http://bugs.python.org/issue7980#msg221094
            # I dont think this needs done here, but keeping to be safe
            datetime.datetime.strptime('20110101', '%Y%m%d')
            if (
                        (epToSub['airdate_daydiff'] > 7 and epToSub[
                            'searchcount'] < 2 and now - datetime.datetime.strptime(
                            epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta(
                            hours=rules['old'][epToSub['searchcount']])) or
                        (
                                            epToSub['airdate_daydiff'] <= 7 and
                                            epToSub['searchcount'] < 7 and
                                            now - datetime.datetime.strptime(
                                            epToSub['lastsearch'], dateTimeFormat) > datetime.timedelta
                                        (
                                        hours=rules['new'][epToSub['searchcount']]
                                    )
                        )
            ):

                sickrage.srCore.srLogger.debug('Downloading subtitles for episode %dx%d of show %s' % (
                    epToSub['season'], epToSub['episode'], epToSub['show_name']))

                showObj = findCertainShow(sickrage.srCore.SHOWLIST, int(epToSub['showid']))
                if not showObj:
                    sickrage.srCore.srLogger.debug('Show not found')
                    return

                epObj = showObj.getEpisode(int(epToSub["season"]), int(epToSub["episode"]))
                if isinstance(epObj, str):
                    sickrage.srCore.srLogger.debug('Episode not found')
                    return

                existing_subtitles = epObj.subtitles

                try:
                    epObj.downloadSubtitles()
                except Exception as e:
                    sickrage.srCore.srLogger.debug('Unable to find subtitles')
                    sickrage.srCore.srLogger.debug(str(e))
                    return

                newSubtitles = frozenset(epObj.subtitles).difference(existing_subtitles)
                if newSubtitles:
                    sickrage.srCore.srLogger.info('Downloaded subtitles for S%02dE%02d in %s' % (
                        epToSub["season"], epToSub["episode"], ', '.join(newSubtitles)))

        self.amActive = False
Пример #11
0
 def setUp(self, **kwargs):
     super(DBBasicTests, self).setUp()
     self.db = MainDB()
Пример #12
0
def validateDir(path, dirName, nzbNameOriginal, failed, result):
    """
    Check if directory is valid for processing

    :param path: Path to use
    :param dirName: Directory to check
    :param nzbNameOriginal: Original NZB name
    :param failed: Previously failed objects
    :param result: Previous results
    :return: True if dir is valid for processing, False if not
    """

    IGNORED_FOLDERS = ['.AppleDouble', '.@__thumb', '@eaDir']

    folder_name = os.path.basename(dirName)
    if folder_name in IGNORED_FOLDERS:
        return False

    result.output += logHelper("Processing folder " + dirName,
                               sickrage.srCore.srLogger.DEBUG)

    if folder_name.startswith('_FAILED_'):
        result.output += logHelper(
            "The directory name indicates it failed to extract.",
            sickrage.srCore.srLogger.DEBUG)
        failed = True
    elif folder_name.startswith('_UNDERSIZED_'):
        result.output += logHelper(
            "The directory name indicates that it was previously rejected for being undersized.",
            sickrage.srCore.srLogger.DEBUG)
        failed = True
    elif folder_name.upper().startswith('_UNPACK'):
        result.output += logHelper(
            "The directory name indicates that this release is in the process of being unpacked.",
            sickrage.srCore.srLogger.DEBUG)
        result.missedfiles.append(dirName + " : Being unpacked")
        return False

    if failed:
        process_failed(os.path.join(path, dirName), nzbNameOriginal, result)
        result.missedfiles.append(dirName + " : Failed download")
        return False

    if is_hidden_folder(os.path.join(path, dirName)):
        result.output += logHelper("Ignoring hidden folder: " + dirName,
                                   sickrage.srCore.srLogger.DEBUG)
        result.missedfiles.append(dirName + " : Hidden folder")
        return False

    # make sure the dir isn't inside a show dir
    for dbData in [
            x['doc'] for x in MainDB().db.all('tv_shows', with_doc=True)
    ]:
        if dirName.lower().startswith(os.path.realpath(dbData["location"]).lower() + os.sep) or \
                        dirName.lower() == os.path.realpath(dbData["location"]).lower():
            result.output += logHelper(
                "Cannot process an episode that's already been moved to its show dir, skipping "
                + dirName, sickrage.srCore.srLogger.WARNING)
            return False

    # Get the videofile list for the next checks
    allFiles = []
    allDirs = []
    for _, processdir, fileList in os.walk(os.path.join(path, dirName),
                                           topdown=False):
        allDirs += processdir
        allFiles += fileList

    videoFiles = [x for x in allFiles if isMediaFile(x)]
    allDirs.append(dirName)

    # check if the dir have at least one tv video file
    for video in videoFiles:
        try:
            NameParser().parse(video, cache_result=False)
            return True
        except (InvalidNameException, InvalidShowException):
            pass

    for proc_dir in allDirs:
        try:
            NameParser().parse(proc_dir, cache_result=False)
            return True
        except (InvalidNameException, InvalidShowException):
            pass

    if sickrage.srCore.srConfig.UNPACK:
        # Search for packed release
        packedFiles = [x for x in allFiles if isRarFile(x)]

        for packed in packedFiles:
            try:
                NameParser().parse(packed, cache_result=False)
                return True
            except (InvalidNameException, InvalidShowException):
                pass

    result.output += logHelper(
        dirName + " : No processable items found in folder",
        sickrage.srCore.srLogger.DEBUG)
    return False
Пример #13
0
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = BackgroundScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

        self.USER_AGENT = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))

        self.SYS_ENCODING = get_sys_encoding()

        # patch modules with encoding kludge
        patch_modules()
Пример #14
0
class Core(object):
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = BackgroundScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

        self.USER_AGENT = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))

        self.SYS_ENCODING = get_sys_encoding()

        # patch modules with encoding kludge
        patch_modules()

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.logFile = self.srConfig.LOG_FILE
        self.srLogger.debugLogging = self.srConfig.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE

        # start logger
        self.srLogger.start()

        # user agent
        if self.srConfig.RANDOM_USER_AGENT:
            self.USER_AGENT = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.USER_AGENT

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [self.mainDB, self.cacheDB, self.failedDB]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not self.srConfig.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time(
        ) - 604800:  # 7 days
            self.mainDB.compact()
            self.srConfig.LAST_DB_COMPACT = int(time.time())

        # load name cache
        self.NAMECACHE.load()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.srConfig.USE_ANIDB:

            def anidb_logger(msg):
                return self.srLogger.debug("AniDB: {} ".format(msg))

            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.ADBA_CONNECTION.auth(self.srConfig.ANIDB_USERNAME,
                                          self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time(
        )
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'days':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add show next episode job
        self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="SHOWNEXTEP",
                                 id="SHOWNEXTEP")

        # add daily search job
        self.srScheduler.add_job(self.DAILYSEARCHER.run,
                                 srIntervalTrigger(
                                     **{
                                         'minutes':
                                         self.srConfig.DAILY_SEARCHER_FREQ,
                                         'min':
                                         self.srConfig.MIN_DAILY_SEARCHER_FREQ,
                                         'start_date':
                                         datetime.datetime.now() +
                                         datetime.timedelta(minutes=4)
                                     }),
                                 name="DAILYSEARCHER",
                                 id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes':
                    self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min':
                    self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
                    'start_date':
                    datetime.datetime.now() + datetime.timedelta(minutes=30)
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

    def shutdown(self, restart=False):
        if self.started:
            self.srLogger.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            self.srWebServer.shutdown()

            # shutdown show queue
            if self.srScheduler:
                self.srLogger.debug("Shutting down scheduler")
                self.srScheduler.shutdown()

            # shutdown show queue
            if self.SHOWQUEUE:
                self.srLogger.debug("Shutting down show queue")
                self.SHOWQUEUE.shutdown()
                del self.SHOWQUEUE

            # shutdown search queue
            if self.SEARCHQUEUE:
                self.srLogger.debug("Shutting down search queue")
                self.SEARCHQUEUE.shutdown()
                del self.SEARCHQUEUE

            # log out of ADBA
            if self.ADBA_CONNECTION:
                self.srLogger.debug("Shutting down ANIDB connection")
                self.ADBA_CONNECTION.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.mainDB, self.cacheDB, self.failedDB]:
                if db.opened:
                    self.srLogger.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            self.srLogger.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)
        elif sickrage.daemon:
            sickrage.daemon.stop()

        self.started = False

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [
                x['doc'] for x in self.mainDB.db.all('tv_shows', with_doc=True)
        ]:
            try:
                self.srLogger.debug("Loading data for show: [%s]",
                                    dbData['show_name'])
                show = TVShow(int(dbData['indexer']),
                              int(dbData['indexer_id']))
                show.nextEpisode()
                self.NAMECACHE.build(show)
                self.SHOWLIST += [show]
            except Exception as e:
                self.srLogger.error("Show error in [%s]: %s" %
                                    (dbData['location'], e.message))
Пример #15
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop().instance()
        self.pid = os.getpid()

        self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quite = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version = None
        self.newest_version_string = None

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))
        self.sys_encoding = get_sys_encoding()
        self.languages = [
            language for language in os.listdir(sickrage.LOCALE_DIR)
            if '_' in language
        ]
        self.showlist = []

        self.api = None
        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = None
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.failed_db = None
        self.scheduler = None
        self.wserver = None
        self.wsession = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None

        # patch modules with encoding kludge
        patch_modules()

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = BackgroundScheduler()
        self.wserver = WebServer()
        self.wsession = WebSession()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time(
        ) - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history',
                                            'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time(
        )
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(hours=self.config.version_updater_freq),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.scheduler.add_job(update_network_dict,
                               IntervalTrigger(days=1),
                               name="TZUPDATER",
                               id="TZUPDATER")

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name="SHOWUPDATER",
                               id="SHOWUPDATER")

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER")

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name="TRAKTSEARCHER",
                               id="TRAKTSEARCHER")

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.scheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.scheduler.get_job('PROPERSEARCHER').pause,
         self.scheduler.get_job('PROPERSEARCHER').resume
         )[self.config.download_propers]()

        # Pause/Resume TRAKTSEARCHER job
        (self.scheduler.get_job('TRAKTSEARCHER').pause,
         self.scheduler.get_job('TRAKTSEARCHER').resume
         )[self.config.use_trakt]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.scheduler.get_job('SUBTITLESEARCHER').pause,
         self.scheduler.get_job('SUBTITLESEARCHER').resume
         )[self.config.use_subtitles]()

        # Pause/Resume POSTPROCESS job
        (self.scheduler.get_job('POSTPROCESSOR').pause,
         self.scheduler.get_job('POSTPROCESSOR').resume
         )[self.config.process_automatically]()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db, self.failed_db]:
                if db.opened:
                    self.log.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.saveToDB()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [
                x['doc']
                for x in self.main_db.db.all('tv_shows', with_doc=True)
        ]:
            try:
                self.log.debug("Loading data for show: [{}]".format(
                    dbData['show_name']))
                show = TVShow(int(dbData['indexer']),
                              int(dbData['indexer_id']))
                show.nextEpisode()
                self.showlist += [show]
            except Exception as e:
                self.log.error("Show error in [%s]: %s" %
                               (dbData['location'], e.message))