Esempio n. 1
0
def is_naming_pattern_valid(pattern=None,
                            multi=None,
                            abd=None,
                            sports=None,
                            anime_type=None):
    if pattern is None:
        return 'invalid'

    if anime_type is not None:
        anime_type = int(anime_type)

    # air by date shows just need one check, we don't need to worry about season folders
    if abd:
        is_valid = validator.check_valid_abd_naming(pattern)
        require_season_folders = False

    # sport shows just need one check, we don't need to worry about season folders
    elif sports:
        is_valid = validator.check_valid_sports_naming(pattern)
        require_season_folders = False

    else:
        # check validity of single and multi ep cases for the whole path
        is_valid = validator.check_valid_naming(pattern, multi, anime_type)

        # check validity of single and multi ep cases for only the file name
        require_season_folders = validator.check_force_season_folders(
            pattern, multi, anime_type)

    if is_valid and not require_season_folders:
        return 'valid'
    elif is_valid and require_season_folders:
        return 'seasonfolders'
    else:
        return 'invalid'
Esempio n. 2
0
    def get(self, *args, **kwargs):
        pattern = self.get_argument('pattern', None)
        multi = self.get_argument('multi', None)
        abd = self.get_argument('abd', None)
        sports = self.get_argument('sports', None)
        anime_type = self.get_argument('anime_type', None)

        if pattern is None:
            return self.write('invalid')

        if multi is not None:
            multi = int(multi)

        if anime_type is not None:
            anime_type = int(anime_type)

        # air by date shows just need one check, we don't need to worry about season folders
        if abd:
            is_valid = validator.check_valid_abd_naming(pattern)
            require_season_folders = False

        # sport shows just need one check, we don't need to worry about season folders
        elif sports:
            is_valid = validator.check_valid_sports_naming(pattern)
            require_season_folders = False

        else:
            # check validity of single and multi ep cases for the whole path
            is_valid = validator.check_valid_naming(pattern, multi, anime_type)

            # check validity of single and multi ep cases for only the file name
            require_season_folders = validator.check_force_season_folders(pattern, multi, anime_type)

        if is_valid and not require_season_folders:
            return self.write('valid')
        elif is_valid and require_season_folders:
            return self.write('seasonfolders')
        else:
            return self.write('invalid')
Esempio n. 3
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(hours=self.config.version_updater_freq),
            name=self.version_updater.name,
            id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(update_network_dict,
                               IntervalTrigger(days=1),
                               name="TZUPDATER",
                               id="TZUPDATER")

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

        # start ioloop
        self.io_loop.start()
Esempio n. 4
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                                 os.path.join(sickrage.DATA_DIR, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # initialize the main SB database
        main_db.MainDB().InitialSchema().upgrade()

        # initialize the cache database
        cache_db.CacheDB().InitialSchema().upgrade()

        # initialize the failed downloads database
        failed_db.FailedDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for dir in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug(
                    "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole',
                                                'utorrent',
                                                'transmission',
                                                'deluge',
                                                'deluged',
                                                'download_station',
                                                'rtorrent',
                                                'qbittorrent',
                                                'mlnet',
                                                'putio'): self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi),
                                   (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
                                   (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
                                   (self.srConfig.METADATA_PS3, ps3),
                                   (self.srConfig.METADATA_WDTV, wdtv),
                                   (self.srConfig.METADATA_TIVO, tivo),
                                   (self.srConfig.METADATA_MEDE8ER, mede8er)]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add show queue job
        self.srScheduler.add_job(
            self.SHOWQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SHOWQUEUE",
            id="SHOWQUEUE"
        )

        # add search queue job
        self.srScheduler.add_job(
            self.SEARCHQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SEARCHQUEUE",
            id="SEARCHQUEUE"
        )

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.srScheduler.add_job(
            update_network_dict,
            srIntervalTrigger(**{'days': 1}),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}),
            name="NAMECACHE",
            id="NAMECACHE"
        )

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{'hours': 1,
                   'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ}),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                   'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ}),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                                 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(**{
                'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.srConfig.PROPER_SEARCHER_INTERVAL]}),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.srScheduler.add_job(
            self.TRAKTSEARCHER.run,
            srIntervalTrigger(**{'hours': 1}),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.instance().start()
Esempio n. 5
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init sentry
        self.init_sentry()

        # scheduler
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})

        # init core classes
        self.api = API()
        self.config = Config(self.db_type, self.db_prefix, self.db_host,
                             self.db_port, self.db_username, self.db_password)
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host,
                              self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host,
                                self.db_port, self.db_username,
                                self.db_password)
        self.notification_providers = NotificationProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.series_providers = SeriesProviders()
        self.log = Logger()
        self.alerts = Notifications()
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.announcements = Announcements()
        self.amqp_client = AMQPClient()

        # authorization sso client
        self.auth_server = AuthServer()

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            self.log.info('Performing restore of backup files')
            success = restore_app_data(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" %
                          ("FAILED", "SUCCESSFUL")[success])
            if success:
                # remove restore files
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # setup databases
        self.main_db.setup()
        self.config.db.setup()
        self.cache_db.setup()

        # load config
        self.config.load()

        # migrate config
        self.config.migrate_config_file(self.config_file)

        # add server id tag to sentry
        sentry_sdk.set_tag('server_id', self.config.general.server_id)

        # add user to sentry
        sentry_sdk.set_user({
            'id': self.config.user.sub_id,
            'username': self.config.user.username,
            'email': self.config.user.email
        })

        # config overrides
        if self.web_port:
            self.config.general.web_port = self.web_port
        if self.web_root:
            self.config.general.web_root = self.web_root

        # set language
        change_gui_lang(self.config.gui.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.general.socket_timeout)

        # set ssl cert/key filenames
        self.https_cert_file = os.path.abspath(
            os.path.join(self.data_dir, 'server.crt'))
        self.https_key_file = os.path.abspath(
            os.path.join(self.data_dir, 'server.key'))

        # setup logger settings
        self.log.logSize = self.config.general.log_size
        self.log.logNr = self.config.general.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.debug or self.config.general.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.general.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        if self.config.general.default_page not in DefaultHomePage:
            self.config.general.default_page = DefaultHomePage.HOME

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.general.anon_redirect.endswith('?'):
            self.config.general.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*',
                        self.config.general.root_dirs):
            self.config.general.root_dirs = ''

        self.naming_force_folders = check_force_season_folders()

        if self.config.general.nzb_method not in NzbMethod:
            self.config.general.nzb_method = NzbMethod.BLACKHOLE

        if self.config.general.torrent_method not in TorrentMethod:
            self.config.general.torrent_method = TorrentMethod.BLACKHOLE

        if self.config.general.auto_postprocessor_freq < self.min_auto_postprocessor_freq:
            self.config.general.auto_postprocessor_freq = self.min_auto_postprocessor_freq

        if self.config.general.daily_searcher_freq < self.min_daily_searcher_freq:
            self.config.general.daily_searcher_freq = self.min_daily_searcher_freq

        if self.config.general.backlog_searcher_freq < self.min_backlog_searcher_freq:
            self.config.general.backlog_searcher_freq = self.min_backlog_searcher_freq

        if self.config.general.version_updater_freq < self.min_version_updater_freq:
            self.config.general.version_updater_freq = self.min_version_updater_freq

        if self.config.general.subtitle_searcher_freq < self.min_subtitle_searcher_freq:
            self.config.general.subtitle_searcher_freq = self.min_subtitle_searcher_freq

        if self.config.failed_snatches.age < self.min_failed_snatch_age:
            self.config.failed_snatches.age = self.min_failed_snatch_age

        if self.config.general.proper_searcher_interval not in CheckPropersInterval:
            self.config.general.proper_searcher_interval = CheckPropersInterval.DAILY

        if self.config.general.show_update_hour < 0 or self.config.general.show_update_hour > 23:
            self.config.general.show_update_hour = 0

        # add app updater job
        self.scheduler.add_job(
            self.version_updater.task,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.version_updater.name,
            id=self.version_updater.name)

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.task,
            IntervalTrigger(days=1,
                            start_date=datetime.datetime.now().replace(
                                hour=self.config.general.show_update_hour),
                            timezone='utc'),
            name=self.show_updater.name,
            id=self.show_updater.name)

        # add rss cache updater job
        self.scheduler.add_job(self.rsscache_updater.task,
                               IntervalTrigger(minutes=15, timezone='utc'),
                               name=self.rsscache_updater.name,
                               id=self.rsscache_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.task,
            IntervalTrigger(minutes=self.config.general.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.task,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.task,
            IntervalTrigger(minutes=self.config.general.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30),
                            timezone='utc'),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.task,
            IntervalTrigger(
                minutes=self.config.general.auto_postprocessor_freq,
                timezone='utc'),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.task,
            IntervalTrigger(
                minutes=self.config.general.proper_searcher_interval.value,
                timezone='utc'),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.task,
                               IntervalTrigger(hours=1, timezone='utc'),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.task,
            IntervalTrigger(hours=self.config.general.subtitle_searcher_freq,
                            timezone='utc'),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.task,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime,
                            timezone='utc'),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start queues
        self.search_queue.start_worker(self.config.general.max_queue_workers)
        self.show_queue.start_worker(self.config.general.max_queue_workers)
        self.postprocessor_queue.start_worker(
            self.config.general.max_queue_workers)

        # start web server
        self.wserver.start()

        # start scheduler service
        self.scheduler.start()

        # perform server checkup
        IOLoop.current().add_callback(self.server_checkup)

        # load shows
        IOLoop.current().add_callback(self.load_shows)

        # load network timezones
        IOLoop.current().spawn_callback(
            self.tz_updater.update_network_timezones)

        # load search provider urls
        IOLoop.current().spawn_callback(self.search_providers.update_urls)

        # startup message
        IOLoop.current().add_callback(self.startup_message)

        # launch browser
        IOLoop.current().add_callback(self.launch_browser)

        # perform server checkups every hour
        PeriodicCallback(self.server_checkup, 1 * 60 * 60 * 1000).start()

        # perform shutdown trigger check every 5 seconds
        PeriodicCallback(self.shutdown_trigger, 5 * 1000).start()

        # start ioloop
        IOLoop.current().start()
Esempio n. 6
0
    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id=self.oidc_client_id, client_secret=self.oidc_client_secret)

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # init encryption public and private keys
        encryption.initialize()

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # perform integrity check
            db.integrity_check()

            # migrate database
            db.migrate()

            # sync database repo
            db.sync_db_repo()

            # cleanup
            db.cleanup()

        # load name cache
        self.name_cache.load()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add API token refresh job
        self.scheduler.add_job(
            API().refresh_token,
            IntervalTrigger(
                hours=1,
            ),
            name='SR-API',
            id='SR-API'
        )

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.io_loop.add_callback(self.search_queue.watch)
        self.io_loop.add_callback(self.show_queue.watch)
        self.io_loop.add_callback(self.postprocessor_queue.watch)

        # fire off startup events
        self.io_loop.run_in_executor(None, self.quicksearch_cache.run)
        self.io_loop.run_in_executor(None, self.name_cache.run)
        self.io_loop.run_in_executor(None, self.version_updater.run)
        self.io_loop.run_in_executor(None, self.tz_updater.run)

        # start web server
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.io_loop.run_in_executor(None, functools.partial(launch_browser, ('http', 'https')[sickrage.app.config.enable_https],
                                                                 sickrage.app.config.web_host, sickrage.app.config.web_port))

        def started():
            self.log.info("SiCKRAGE :: STARTED")
            self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version()))
            self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version))
            self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version))
            self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type))
            self.log.info("SiCKRAGE :: URL:[{}://{}:{}{}]".format(('http', 'https')[self.config.enable_https], self.config.web_host, self.config.web_port,
                                                                  self.config.web_root))

        # start io_loop
        self.io_loop.add_callback(started)
        self.io_loop.start()
Esempio n. 7
0
def initialize():
    if not sickrage.INITIALIZED:
        with threading.Lock():
            # init encoding
            encodingInit()

            # Check if we need to perform a restore first
            os.chdir(sickrage.DATA_DIR)
            restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
            if os.path.exists(restore_dir):
                success = restoreDB(restore_dir, sickrage.DATA_DIR)
                sickrage.LOGGER.info("Restore: restoring DB and config.ini %s!\n" % ("FAILED", "SUCCESSFUL")[success])

            # init indexerApi
            sickrage.INDEXER_API = indexerApi

            # initialize notifiers
            sickrage.NOTIFIERS = AttrDict(
                    libnotify=LibnotifyNotifier(),
                    kodi_notifier=KODINotifier(),
                    plex_notifier=PLEXNotifier(),
                    emby_notifier=EMBYNotifier(),
                    nmj_notifier=NMJNotifier(),
                    nmjv2_notifier=NMJv2Notifier(),
                    synoindex_notifier=synoIndexNotifier(),
                    synology_notifier=synologyNotifier(),
                    pytivo_notifier=pyTivoNotifier(),
                    growl_notifier=GrowlNotifier(),
                    prowl_notifier=ProwlNotifier(),
                    libnotify_notifier=LibnotifyNotifier(),
                    pushover_notifier=PushoverNotifier(),
                    boxcar_notifier=BoxcarNotifier(),
                    boxcar2_notifier=Boxcar2Notifier(),
                    nma_notifier=NMA_Notifier(),
                    pushalot_notifier=PushalotNotifier(),
                    pushbullet_notifier=PushbulletNotifier(),
                    freemobile_notifier=FreeMobileNotifier(),
                    twitter_notifier=TwitterNotifier(),
                    trakt_notifier=TraktNotifier(),
                    email_notifier=EmailNotifier(),
            )

            sickrage.NAMING_EP_TYPE = ("%(seasonnumber)dx%(episodenumber)02d",
                                       "s%(seasonnumber)02de%(episodenumber)02d",
                                       "S%(seasonnumber)02dE%(episodenumber)02d",
                                       "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.SPORTS_EP_TYPE = ("%(seasonnumber)dx%(episodenumber)02d",
                                       "s%(seasonnumber)02de%(episodenumber)02d",
                                       "S%(seasonnumber)02dE%(episodenumber)02d",
                                       "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02", "01x02")
            sickrage.NAMING_MULTI_EP_TYPE = {0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE),
                                             1: [" - " + x for x in sickrage.NAMING_EP_TYPE],
                                             2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]}

            sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate", "repeat")
            sickrage.NAMING_SEP_TYPE = (" - ", " ")
            sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space")

            # migrate old database filenames to new ones
            if not os.path.exists(main_db.MainDB().filename) and os.path.exists("sickbeard.db"):
                helpers.moveFile("sickbeard.db", main_db.MainDB().filename)

            # init config file
            srConfig.load_config(sickrage.CONFIG_FILE, True)

            # set socket timeout
            socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT)

            # init logger
            sickrage.LOGGER = sickrage.LOGGER.__class__(logFile=sickrage.LOG_FILE,
                                                        logSize=sickrage.LOG_SIZE,
                                                        logNr=sickrage.LOG_NR,
                                                        fileLogging=makeDir(sickrage.LOG_DIR),
                                                        debugLogging=sickrage.DEBUG)

            # init updater and get current version
            sickrage.VERSIONUPDATER = VersionUpdater()
            sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version

            # initialize the main SB database
            main_db.MainDB().InitialSchema().upgrade()

            # initialize the cache database
            cache_db.CacheDB().InitialSchema().upgrade()

            # initialize the failed downloads database
            failed_db.FailedDB().InitialSchema().upgrade()

            # fix up any db problems
            main_db.MainDB().SanityCheck()

            if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
                sickrage.DEFAULT_PAGE = 'home'

            if not makeDir(sickrage.CACHE_DIR):
                sickrage.LOGGER.error("!!! Creating local cache dir failed")
                sickrage.CACHE_DIR = None

            # Check if we need to perform a restore of the cache folder
            try:
                restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
                if os.path.exists(restore_dir) and os.path.exists(os.path.join(restore_dir, 'cache')):
                    def restore_cache(srcdir, dstdir):
                        def path_leaf(path):
                            head, tail = os.path.split(path)
                            return tail or os.path.basename(head)

                        try:
                            if os.path.isdir(dstdir):
                                bakfilename = '{}-{1}'.format(path_leaf(dstdir),
                                                               datetime.datetime.strftime(datetime.date.now(),
                                                                                          '%Y%m%d_%H%M%S'))
                                shutil.move(dstdir, os.path.join(os.path.dirname(dstdir), bakfilename))

                            shutil.move(srcdir, dstdir)
                            sickrage.LOGGER.info("Restore: restoring cache successful")
                        except Exception as E:
                            sickrage.LOGGER.error("Restore: restoring cache failed: {}".format(E))

                    restore_cache(os.path.join(restore_dir, 'cache'), sickrage.CACHE_DIR)
            except Exception as e:
                sickrage.LOGGER.error("Restore: restoring cache failed: {}".format(e))
            finally:
                if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')):
                    try:
                        removetree(os.path.join(sickrage.DATA_DIR, 'restore'))
                    except Exception as e:
                        sickrage.LOGGER.error("Restore: Unable to remove the restore directory: {}".format(e))

                    for cleanupDir in ['mako', 'sessions', 'indexers']:
                        try:
                            removetree(os.path.join(sickrage.CACHE_DIR, cleanupDir))
                        except Exception as e:
                            sickrage.LOGGER.warning(
                                    "Restore: Unable to remove the cache/{} directory: {1}".format(cleanupDir, e))

            if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535:
                sickrage.WEB_PORT = 8081

            if not sickrage.WEB_COOKIE_SECRET:
                sickrage.WEB_COOKIE_SECRET = generateCookieSecret()

            # attempt to help prevent users from breaking links by using a bad url
            if not sickrage.ANON_REDIRECT.endswith('?'):
                sickrage.ANON_REDIRECT = ''

            if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS):
                sickrage.ROOT_DIRS = ''

            sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders()
            if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
                sickrage.NZB_METHOD = 'blackhole'

            if not sickrage.PROVIDER_ORDER:
                sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \
                                          sickrage.providersDict[GenericProvider.TORRENT].keys()

            if sickrage.TORRENT_METHOD not in (
                    'blackhole', 'utorrent', 'transmission', 'deluge', 'deluged', 'download_station', 'rtorrent',
                    'qbittorrent', 'mlnet'):
                sickrage.TORRENT_METHOD = 'blackhole'

            if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
                sickrage.PROPER_SEARCHER_INTERVAL = 'daily'

            if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ:
                sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ

            if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ:
                sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ

            if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ:
                sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ

            sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
            if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ:
                sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ

            if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ:
                sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ

            if sickrage.SHOWUPDATE_HOUR > 23:
                sickrage.SHOWUPDATE_HOUR = 0
            elif sickrage.SHOWUPDATE_HOUR < 0:
                sickrage.SHOWUPDATE_HOUR = 0

            if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ:
                sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ

            sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ

            if sickrage.SUBTITLES_LANGUAGES[0] == '':
                sickrage.SUBTITLES_LANGUAGES = []

            sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace(":%S", "")

            # initialize metadata_providers
            sickrage.metadataProvideDict = get_metadata_generator_dict()
            for cur_metadata_tuple in [(sickrage.METADATA_KODI, kodi),
                                       (sickrage.METADATA_KODI_12PLUS, kodi_12plus),
                                       (sickrage.METADATA_MEDIABROWSER, mediabrowser),
                                       (sickrage.METADATA_PS3, ps3),
                                       (sickrage.METADATA_WDTV, wdtv),
                                       (sickrage.METADATA_TIVO, tivo),
                                       (sickrage.METADATA_MEDE8ER, mede8er)]:
                (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
                tmp_provider = cur_metadata_class.metadata_class()
                tmp_provider.set_config(cur_metadata_config)

                sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider

            # init caches
            sickrage.NAMECACHE = nameCache()

            # init queues
            sickrage.SHOWUPDATER = ShowUpdater()
            sickrage.SHOWQUEUE = ShowQueue()
            sickrage.SEARCHQUEUE = SearchQueue()

            # load data for shows from database
            sickrage.showList = load_shows()

            # init searchers
            sickrage.DAILYSEARCHER = DailySearcher()
            sickrage.BACKLOGSEARCHER = BacklogSearcher()
            sickrage.PROPERSEARCHER = ProperSearcher()
            sickrage.TRAKTSEARCHER = TraktSearcher()
            sickrage.SUBTITLESEARCHER = SubtitleSearcher()

            # init scheduler
            sickrage.Scheduler = Scheduler()

            # add version checker job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.VERSIONUPDATER.run,
                    SRIntervalTrigger(
                        **{'hours': sickrage.VERSION_UPDATER_FREQ, 'min': sickrage.MIN_VERSION_UPDATER_FREQ}),
                    name="VERSIONUPDATER",
                    id="VERSIONUPDATER",
                    replace_existing=True
            )

            # add network timezones updater job to scheduler
            sickrage.Scheduler.add_job(
                    update_network_dict,
                    SRIntervalTrigger(**{'days': 1}),
                    name="TZUPDATER",
                    id="TZUPDATER",
                    replace_existing=True
            )

            # add namecache updater job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.NAMECACHE.run,
                    SRIntervalTrigger(**{'minutes': sickrage.NAMECACHE_FREQ, 'min': sickrage.MIN_NAMECACHE_FREQ}),
                    name="NAMECACHE",
                    id="NAMECACHE",
                    replace_existing=True
            )

            # add show queue job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.SHOWQUEUE.run,
                    SRIntervalTrigger(**{'seconds': 3}),
                    name="SHOWQUEUE",
                    id="SHOWQUEUE",
                    replace_existing=True
            )

            # add search queue job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.SEARCHQUEUE.run,
                    SRIntervalTrigger(**{'seconds': 1}),
                    name="SEARCHQUEUE",
                    id="SEARCHQUEUE",
                    replace_existing=True
            )

            # add show updater job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.SHOWUPDATER.run,
                    SRIntervalTrigger(
                            **{'hours': 1,
                               'start_date': datetime.datetime.now().replace(hour=sickrage.SHOWUPDATE_HOUR)}),
                    name="SHOWUPDATER",
                    id="SHOWUPDATER",
                    replace_existing=True
            )

            # add daily search job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.DAILYSEARCHER.run,
                    SRIntervalTrigger(
                            **{'minutes': sickrage.DAILY_SEARCHER_FREQ, 'min': sickrage.MIN_DAILY_SEARCHER_FREQ}),
                    name="DAILYSEARCHER",
                    id="DAILYSEARCHER",
                    replace_existing=True
            )

            # add backlog search job to scheduler
            sickrage.Scheduler.add_job(
                    sickrage.BACKLOGSEARCHER.run,
                    SRIntervalTrigger(
                            **{'minutes': sickrage.BACKLOG_SEARCHER_FREQ,
                               'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ}),
                    name="BACKLOG",
                    id="BACKLOG",
                    replace_existing=True
            )

            # add auto-postprocessing job to scheduler
            job = sickrage.Scheduler.add_job(
                    auto_postprocessor.PostProcessor().run,
                    SRIntervalTrigger(**{'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ,
                                         'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ}),
                    name="POSTPROCESSOR",
                    id="POSTPROCESSOR",
                    replace_existing=True
            )
            (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]()

            # add find propers job to scheduler
            job = sickrage.Scheduler.add_job(
                    sickrage.PROPERSEARCHER.run,
                    SRIntervalTrigger(**{
                        'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                            sickrage.PROPER_SEARCHER_INTERVAL]}),
                    name="PROPERSEARCHER",
                    id="PROPERSEARCHER",
                    replace_existing=True
            )
            (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]()

            # add trakt.tv checker job to scheduler
            job = sickrage.Scheduler.add_job(
                    sickrage.TRAKTSEARCHER.run,
                    SRIntervalTrigger(**{'hours': 1}),
                    name="TRAKTSEARCHER",
                    id="TRAKTSEARCHER",
                    replace_existing=True,
            )
            (job.pause, job.resume)[sickrage.USE_TRAKT]()

            # add subtitles finder job to scheduler
            job = sickrage.Scheduler.add_job(
                    sickrage.SUBTITLESEARCHER.run,
                    SRIntervalTrigger(**{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}),
                    name="SUBTITLESEARCHER",
                    id="SUBTITLESEARCHER",
                    replace_existing=True
            )
            (job.pause, job.resume)[sickrage.USE_SUBTITLES]()

            # initialize web server
            sickrage.WEB_SERVER = SRWebServer(**{
                'port': int(sickrage.WEB_PORT),
                'host': sickrage.WEB_HOST,
                'data_root': sickrage.DATA_DIR,
                'gui_root': sickrage.GUI_DIR,
                'web_root': sickrage.WEB_ROOT,
                'log_dir': sickrage.WEB_LOG or sickrage.LOG_DIR,
                'username': sickrage.WEB_USERNAME,
                'password': sickrage.WEB_PASSWORD,
                'enable_https': sickrage.ENABLE_HTTPS,
                'handle_reverse_proxy': sickrage.HANDLE_REVERSE_PROXY,
                'https_cert': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT),
                'https_key': os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY),
                'daemonize': sickrage.DAEMONIZE,
                'pidfile': sickrage.PIDFILE,
                'stop_timeout': 3,
                'nolaunch': sickrage.WEB_NOLAUNCH
            })

            sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format(sickrage.VERSION, sickrage.CONFIG_FILE))
            sickrage.INITIALIZED = True
            return True
Esempio n. 8
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                sickrage.restart = False
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [MainDB, CacheDB, FailedDB]:
            # initialize the database
            db().initialize()

            # migrate the database
            db().migrate()

            # compact the main database
            db().compact()

        # load data for shows from database
        self.load_shows()

        # build name cache
        self.NAMECACHE.build()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [
            (self.srConfig.METADATA_KODI, kodi),
            (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
            (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
            (self.srConfig.METADATA_PS3, ps3),
            (self.srConfig.METADATA_WDTV, wdtv),
            (self.srConfig.METADATA_TIVO, tivo),
            (self.srConfig.METADATA_MEDE8ER, mede8er)
        ]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.NAMECACHE_FREQ,
                    'min': self.srConfig.MIN_NAMECACHE_FREQ
                }),
            name="NAMECACHE",
            id="NAMECACHE")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.DAILY_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ
                }),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.current().start()
Esempio n. 9
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.logFile = self.srConfig.LOG_FILE
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE

        # start logger
        self.srLogger.start()

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                sickrage.restart = False
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [self.mainDB, self.cacheDB, self.failedDB]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time(
        ) - 604800:  # 7 days
            self.mainDB.compact()
            self.srConfig.LAST_DB_COMPACT = int(time.time())

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time(
        )
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'days':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add show next episode job
        self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="SHOWNEXTEP",
                                 id="SHOWNEXTEP")

        # add daily search job
        self.srScheduler.add_job(self.DAILYSEARCHER.run,
                                 srIntervalTrigger(
                                     **{
                                         'minutes':
                                         self.srConfig.DAILY_SEARCHER_FREQ,
                                         'min':
                                         self.srConfig.MIN_DAILY_SEARCHER_FREQ,
                                         'start_date':
                                         datetime.datetime.now() +
                                         datetime.timedelta(minutes=4)
                                     }),
                                 name="DAILYSEARCHER",
                                 id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes':
                    self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min':
                    self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
                    'start_date':
                    datetime.datetime.now() + datetime.timedelta(minutes=30)
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

        self.srLogger.info("SiCKRAGE :: STARTED")
        self.srLogger.info("SiCKRAGE :: VERSION:[{}]".format(
            self.VERSIONUPDATER.version))
        self.srLogger.info("SiCKRAGE :: CONFIG:[{}] [v{}]".format(
            sickrage.CONFIG_FILE, self.srConfig.CONFIG_VERSION))
        self.srLogger.info("SiCKRAGE :: URL:[{}://{}:{}/]".format(
            ('http', 'https')[self.srConfig.ENABLE_HTTPS],
            self.srConfig.WEB_HOST, self.srConfig.WEB_PORT))

        # launch browser window
        if all(
            [not sickrage.NOLAUNCH, sickrage.srCore.srConfig.LAUNCH_BROWSER]):
            threading.Thread(
                None, lambda: launch_browser(
                    ('http', 'https')[sickrage.srCore.srConfig.ENABLE_HTTPS],
                    self.srConfig.WEB_HOST, sickrage.srCore.srConfig.WEB_PORT)
            ).start()

        # start ioloop event handler
        self.io_loop.start()
Esempio n. 10
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = BackgroundScheduler()
        self.wserver = WebServer()
        self.wsession = WebSession()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()


        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'),
                                 os.path.join(self.data_dir, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                             os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:
            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq
            ),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.scheduler.add_job(
            update_network_dict,
            IntervalTrigger(
                days=1
            ),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.config.proper_searcher_interval]
            ),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.scheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.scheduler.get_job('PROPERSEARCHER').pause,
         self.scheduler.get_job('PROPERSEARCHER').resume
         )[self.config.download_propers]()

        # Pause/Resume TRAKTSEARCHER job
        (self.scheduler.get_job('TRAKTSEARCHER').pause,
         self.scheduler.get_job('TRAKTSEARCHER').resume
         )[self.config.use_trakt]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.scheduler.get_job('SUBTITLESEARCHER').pause,
         self.scheduler.get_job('SUBTITLESEARCHER').resume
         )[self.config.use_subtitles]()

        # Pause/Resume POSTPROCESS job
        (self.scheduler.get_job('POSTPROCESSOR').pause,
         self.scheduler.get_job('POSTPROCESSOR').resume
         )[self.config.process_automatically]()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()
Esempio n. 11
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # event loop policy that allows loop creation on any thread.
        asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())

        # scheduler
        self.scheduler = BackgroundScheduler({'apscheduler.timezone': 'UTC'})

        # init core classes
        self.api = API()
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.announcements = Announcements()

        # authorization sso client
        self.auth_server = AuthServer()

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            self.log.info('Performing restore of backup files')
            success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                # self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
                # self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # init encryption public and private keys
        encryption.initialize()

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # perform integrity check
            self.log.info("Performing integrity check on {} database".format(db.name))
            db.integrity_check()

            # migrate database
            self.log.info("Performing migrations on {} database".format(db.name))
            db.migrate()

            # upgrade database
            self.log.info("Performing upgrades on {} database".format(db.name))
            db.upgrade()

            # cleanup
            self.log.info("Performing cleanup on {} database".format(db.name))
            db.cleanup()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq

        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq

        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq

        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq

        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq

        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age

        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'

        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.task,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
                timezone='utc'
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.task,
            IntervalTrigger(
                days=1,
                timezone='utc'
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.task,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour),
                timezone='utc'
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.task,
            IntervalTrigger(
                minutes=15,
                timezone='utc'
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.task,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.task,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.task,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30),
                timezone='utc'
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.task,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq,
                timezone='utc'
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.task,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval],
                timezone='utc'
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.task,
            IntervalTrigger(
                hours=1,
                timezone='utc'
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.task,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq,
                timezone='utc'
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.task,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime,
                timezone='utc'
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add announcements job
        self.scheduler.add_job(
            self.announcements.task,
            IntervalTrigger(
                minutes=15,
                timezone='utc'
            ),
            name=self.announcements.name,
            id=self.announcements.name
        )

        # add provider URL update job
        self.scheduler.add_job(
            self.search_providers.task,
            IntervalTrigger(
                hours=1,
                timezone='utc'
            ),
            name=self.search_providers.name,
            id=self.search_providers.name
        )

        # start queues
        self.search_queue.start_worker(self.config.max_queue_workers)
        self.show_queue.start_worker(self.config.max_queue_workers)
        self.postprocessor_queue.start_worker(self.config.max_queue_workers)

        # start web server
        self.wserver.start()

        # fire off jobs now
        self.scheduler.get_job(self.version_updater.name).modify(next_run_time=datetime.datetime.utcnow())
        self.scheduler.get_job(self.tz_updater.name).modify(next_run_time=datetime.datetime.utcnow())
        self.scheduler.get_job(self.announcements.name).modify(next_run_time=datetime.datetime.utcnow())
        self.scheduler.get_job(self.search_providers.name).modify(next_run_time=datetime.datetime.utcnow())

        # start scheduler service
        self.scheduler.start()

        # load shows
        self.scheduler.add_job(self.load_shows)

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.scheduler.add_job(launch_browser, args=[('http', 'https')[sickrage.app.config.enable_https],
                                                         sickrage.app.config.web_host, sickrage.app.config.web_port])

        self.log.info("SiCKRAGE :: STARTED")
        self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(sickrage.version()))
        self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(self.config.config_version))
        self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(self.main_db.version))
        self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(self.db_type))
        self.log.info("SiCKRAGE :: URL:[{}://{}:{}/{}]".format(('http', 'https')[self.config.enable_https],
                                                               (self.config.web_host, get_lan_ip())[self.config.web_host == '0.0.0.0'],
                                                               self.config.web_port,
                                                               self.config.web_root))
Esempio n. 12
0
    async def post(self, *args, **kwargs):
        naming_pattern = self.get_argument('naming_pattern', '')
        naming_multi_ep = self.get_argument('naming_multi_ep', '')
        kodi_data = self.get_argument('kodi_data', '')
        kodi_12plus_data = self.get_argument('kodi_12plus_data', '')
        mediabrowser_data = self.get_argument('mediabrowser_data', '')
        sony_ps3_data = self.get_argument('sony_ps3_data', '')
        wdtv_data = self.get_argument('wdtv_data', '')
        tivo_data = self.get_argument('tivo_data', '')
        mede8er_data = self.get_argument('mede8er_data', '')
        keep_processed_dir = self.get_argument('keep_processed_dir', '')
        process_method = self.get_argument('process_method', '')
        del_rar_contents = self.get_argument('del_rar_contents', '')
        process_automatically = self.get_argument('process_automatically', '')
        no_delete = self.get_argument('no_delete', '')
        rename_episodes = self.get_argument('rename_episodes', '')
        airdate_episodes = self.get_argument('airdate_episodes', '')
        file_timestamp_timezone = self.get_argument('file_timestamp_timezone',
                                                    '')
        unpack = self.get_argument('unpack', '')
        move_associated_files = self.get_argument('move_associated_files', '')
        sync_files = self.get_argument('sync_files', '')
        postpone_if_sync_files = self.get_argument('postpone_if_sync_files',
                                                   '')
        nfo_rename = self.get_argument('nfo_rename', '')
        tv_download_dir = self.get_argument('tv_download_dir', '')
        naming_custom_abd = self.get_argument('naming_custom_abd', '')
        naming_anime = self.get_argument('naming_anime', '')
        create_missing_show_dirs = self.get_argument(
            'create_missing_show_dirs', '')
        add_shows_wo_dir = self.get_argument('add_shows_wo_dir', '')
        naming_abd_pattern = self.get_argument('naming_abd_pattern', '')
        naming_strip_year = self.get_argument('naming_strip_year', '')
        delete_failed = self.get_argument('delete_failed', '')
        extra_scripts = self.get_argument('extra_scripts', '')
        naming_custom_sports = self.get_argument('naming_custom_sports', '')
        naming_sports_pattern = self.get_argument('naming_sports_pattern', '')
        naming_custom_anime = self.get_argument('naming_custom_anime', '')
        naming_anime_pattern = self.get_argument('naming_anime_pattern', '')
        naming_anime_multi_ep = self.get_argument('naming_anime_multi_ep', '')
        autopostprocessor_frequency = self.get_argument(
            'autopostprocessor_frequency', '')
        delete_non_associated_files = self.get_argument(
            'delete_non_associated_files', '')
        allowed_extensions = self.get_argument('allowed_extensions', '')
        processor_follow_symlinks = self.get_argument(
            'processor_follow_symlinks', '')
        unpack_dir = self.get_argument('unpack_dir', '')

        results = []

        if not sickrage.app.config.change_tv_download_dir(tv_download_dir):
            results += [
                _("Unable to create directory ") +
                os.path.normpath(tv_download_dir) + _(", dir not changed.")
            ]

        sickrage.app.config.change_autopostprocessor_freq(
            autopostprocessor_frequency)
        sickrage.app.config.process_automatically = checkbox_to_value(
            process_automatically)

        if unpack:
            if is_rar_supported() != "not supported":
                sickrage.app.config.unpack = checkbox_to_value(unpack)
                sickrage.app.config.unpack_dir = unpack_dir
            else:
                sickrage.app.config.unpack = 0
                results.append(
                    _("Unpacking Not Supported, disabling unpack setting"))
        else:
            sickrage.app.config.unpack = checkbox_to_value(unpack)

        sickrage.app.config.no_delete = checkbox_to_value(no_delete)
        sickrage.app.config.keep_processed_dir = checkbox_to_value(
            keep_processed_dir)
        sickrage.app.config.create_missing_show_dirs = checkbox_to_value(
            create_missing_show_dirs)
        sickrage.app.config.add_shows_wo_dir = checkbox_to_value(
            add_shows_wo_dir)
        sickrage.app.config.process_method = process_method
        sickrage.app.config.delrarcontents = checkbox_to_value(
            del_rar_contents)
        sickrage.app.config.extra_scripts = [
            x.strip() for x in extra_scripts.split('|') if x.strip()
        ]
        sickrage.app.config.rename_episodes = checkbox_to_value(
            rename_episodes)
        sickrage.app.config.airdate_episodes = checkbox_to_value(
            airdate_episodes)
        sickrage.app.config.file_timestamp_timezone = file_timestamp_timezone
        sickrage.app.config.move_associated_files = checkbox_to_value(
            move_associated_files)
        sickrage.app.config.sync_files = sync_files
        sickrage.app.config.postpone_if_sync_files = checkbox_to_value(
            postpone_if_sync_files)
        sickrage.app.config.allowed_extensions = ','.join(
            {x.strip()
             for x in allowed_extensions.split(',') if x.strip()})
        sickrage.app.config.naming_custom_abd = checkbox_to_value(
            naming_custom_abd)
        sickrage.app.config.naming_custom_sports = checkbox_to_value(
            naming_custom_sports)
        sickrage.app.config.naming_custom_anime = checkbox_to_value(
            naming_custom_anime)
        sickrage.app.config.naming_strip_year = checkbox_to_value(
            naming_strip_year)
        sickrage.app.config.delete_failed = checkbox_to_value(delete_failed)
        sickrage.app.config.nfo_rename = checkbox_to_value(nfo_rename)
        sickrage.app.config.delete_non_associated_files = checkbox_to_value(
            delete_non_associated_files)
        sickrage.app.config.processor_follow_symlinks = checkbox_to_value(
            processor_follow_symlinks)

        if is_naming_pattern_valid(pattern=naming_pattern,
                                   multi=naming_multi_ep) != "invalid":
            sickrage.app.config.naming_pattern = naming_pattern
            sickrage.app.config.naming_multi_ep = int(naming_multi_ep)
            sickrage.app.config.naming_force_folders = validator.check_force_season_folders(
            )
        else:
            results.append(
                _("You tried saving an invalid naming config, not saving your naming settings"
                  ))

        if is_naming_pattern_valid(pattern=naming_anime_pattern,
                                   multi=naming_anime_multi_ep,
                                   anime_type=naming_anime) != "invalid":
            sickrage.app.config.naming_anime_pattern = naming_anime_pattern
            sickrage.app.config.naming_anime_multi_ep = int(
                naming_anime_multi_ep)
            sickrage.app.config.naming_anime = int(naming_anime)
        else:
            results.append(
                _("You tried saving an invalid anime naming config, not saving your naming settings"
                  ))

        if is_naming_pattern_valid(pattern=naming_abd_pattern,
                                   abd=True) != "invalid":
            sickrage.app.config.naming_abd_pattern = naming_abd_pattern
        else:
            results.append(
                _("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings"
                  ))

        if is_naming_pattern_valid(pattern=naming_sports_pattern,
                                   multi=naming_multi_ep,
                                   sports=True) != "invalid":
            sickrage.app.config.naming_sports_pattern = naming_sports_pattern
        else:
            results.append(
                _("You tried saving an invalid sports naming config, not saving your sports settings"
                  ))

        sickrage.app.metadata_providers['kodi'].config = kodi_data
        sickrage.app.metadata_providers[
            'kodi_12plus'].config = kodi_12plus_data
        sickrage.app.metadata_providers[
            'mediabrowser'].config = mediabrowser_data
        sickrage.app.metadata_providers['sony_ps3'].config = sony_ps3_data
        sickrage.app.metadata_providers['wdtv'].config = wdtv_data
        sickrage.app.metadata_providers['tivo'].config = tivo_data
        sickrage.app.metadata_providers['mede8er'].config = mede8er_data

        sickrage.app.config.save()

        if len(results) > 0:
            [sickrage.app.log.warning(x) for x in results]
            sickrage.app.alerts.error(_('Error(s) Saving Configuration'),
                                      '<br>\n'.join(results))
        else:
            sickrage.app.alerts.message(
                _('[POST-PROCESSING] Configuration Encrypted and Saved to disk'
                  ))

        return self.redirect("/config/postProcessing/")
Esempio n. 13
0
    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()
Esempio n. 14
0
def initialize():
    if not sickrage.INITIALIZED:
        with threading.Lock():
            # init encoding
            encodingInit()

            # Check if we need to perform a restore first
            os.chdir(sickrage.DATA_DIR)
            restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
            if os.path.exists(restore_dir):
                success = restoreDB(restore_dir, sickrage.DATA_DIR)
                sickrage.LOGGER.info(
                    "Restore: restoring DB and config.ini %s!\n" %
                    ("FAILED", "SUCCESSFUL")[success])

            # init indexerApi
            sickrage.INDEXER_API = indexerApi

            # initialize notifiers
            sickrage.NOTIFIERS = AttrDict(
                libnotify=LibnotifyNotifier(),
                kodi_notifier=KODINotifier(),
                plex_notifier=PLEXNotifier(),
                emby_notifier=EMBYNotifier(),
                nmj_notifier=NMJNotifier(),
                nmjv2_notifier=NMJv2Notifier(),
                synoindex_notifier=synoIndexNotifier(),
                synology_notifier=synologyNotifier(),
                pytivo_notifier=pyTivoNotifier(),
                growl_notifier=GrowlNotifier(),
                prowl_notifier=ProwlNotifier(),
                libnotify_notifier=LibnotifyNotifier(),
                pushover_notifier=PushoverNotifier(),
                boxcar_notifier=BoxcarNotifier(),
                boxcar2_notifier=Boxcar2Notifier(),
                nma_notifier=NMA_Notifier(),
                pushalot_notifier=PushalotNotifier(),
                pushbullet_notifier=PushbulletNotifier(),
                freemobile_notifier=FreeMobileNotifier(),
                twitter_notifier=TwitterNotifier(),
                trakt_notifier=TraktNotifier(),
                email_notifier=EmailNotifier(),
            )

            sickrage.NAMING_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.SPORTS_EP_TYPE = (
                "%(seasonnumber)dx%(episodenumber)02d",
                "s%(seasonnumber)02de%(episodenumber)02d",
                "S%(seasonnumber)02dE%(episodenumber)02d",
                "%(seasonnumber)02dx%(episodenumber)02d")

            sickrage.NAMING_EP_TYPE_TEXT = ("1x02", "s01e02", "S01E02",
                                            "01x02")
            sickrage.NAMING_MULTI_EP_TYPE = {
                0: ["-%(episodenumber)02d"] * len(sickrage.NAMING_EP_TYPE),
                1: [" - " + x for x in sickrage.NAMING_EP_TYPE],
                2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
            }

            sickrage.NAMING_MULTI_EP_TYPE_TEXT = ("extend", "duplicate",
                                                  "repeat")
            sickrage.NAMING_SEP_TYPE = (" - ", " ")
            sickrage.NAMING_SEP_TYPE_TEXT = (" - ", "space")

            # migrate old database filenames to new ones
            if not os.path.exists(main_db.MainDB().filename
                                  ) and os.path.exists("sickbeard.db"):
                helpers.moveFile("sickbeard.db", main_db.MainDB().filename)

            # init config file
            srConfig.load_config(sickrage.CONFIG_FILE, True)

            # set socket timeout
            socket.setdefaulttimeout(sickrage.SOCKET_TIMEOUT)

            # init logger
            sickrage.LOGGER = sickrage.LOGGER.__class__(
                logFile=sickrage.LOG_FILE,
                logSize=sickrage.LOG_SIZE,
                logNr=sickrage.LOG_NR,
                fileLogging=makeDir(sickrage.LOG_DIR),
                debugLogging=sickrage.DEBUG)

            # init updater and get current version
            sickrage.VERSIONUPDATER = VersionUpdater()
            sickrage.VERSION = sickrage.VERSIONUPDATER.updater.get_cur_version

            # initialize the main SB database
            main_db.MainDB().InitialSchema().upgrade()

            # initialize the cache database
            cache_db.CacheDB().InitialSchema().upgrade()

            # initialize the failed downloads database
            failed_db.FailedDB().InitialSchema().upgrade()

            # fix up any db problems
            main_db.MainDB().SanityCheck()

            if sickrage.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                             'news', 'IRC'):
                sickrage.DEFAULT_PAGE = 'home'

            if not makeDir(sickrage.CACHE_DIR):
                sickrage.LOGGER.error("!!! Creating local cache dir failed")
                sickrage.CACHE_DIR = None

            # Check if we need to perform a restore of the cache folder
            try:
                restore_dir = os.path.join(sickrage.DATA_DIR, 'restore')
                if os.path.exists(restore_dir) and os.path.exists(
                        os.path.join(restore_dir, 'cache')):

                    def restore_cache(srcdir, dstdir):
                        def path_leaf(path):
                            head, tail = os.path.split(path)
                            return tail or os.path.basename(head)

                        try:
                            if os.path.isdir(dstdir):
                                bakfilename = '{}-{1}'.format(
                                    path_leaf(dstdir),
                                    datetime.datetime.strftime(
                                        datetime.date.now(), '%Y%m%d_%H%M%S'))
                                shutil.move(
                                    dstdir,
                                    os.path.join(os.path.dirname(dstdir),
                                                 bakfilename))

                            shutil.move(srcdir, dstdir)
                            sickrage.LOGGER.info(
                                "Restore: restoring cache successful")
                        except Exception as E:
                            sickrage.LOGGER.error(
                                "Restore: restoring cache failed: {}".format(
                                    E))

                    restore_cache(os.path.join(restore_dir, 'cache'),
                                  sickrage.CACHE_DIR)
            except Exception as e:
                sickrage.LOGGER.error(
                    "Restore: restoring cache failed: {}".format(e))
            finally:
                if os.path.exists(os.path.join(sickrage.DATA_DIR, 'restore')):
                    try:
                        removetree(os.path.join(sickrage.DATA_DIR, 'restore'))
                    except Exception as e:
                        sickrage.LOGGER.error(
                            "Restore: Unable to remove the restore directory: {}"
                            .format(e))

                    for cleanupDir in ['mako', 'sessions', 'indexers']:
                        try:
                            removetree(
                                os.path.join(sickrage.CACHE_DIR, cleanupDir))
                        except Exception as e:
                            sickrage.LOGGER.warning(
                                "Restore: Unable to remove the cache/{} directory: {1}"
                                .format(cleanupDir, e))

            if sickrage.WEB_PORT < 21 or sickrage.WEB_PORT > 65535:
                sickrage.WEB_PORT = 8081

            if not sickrage.WEB_COOKIE_SECRET:
                sickrage.WEB_COOKIE_SECRET = generateCookieSecret()

            # attempt to help prevent users from breaking links by using a bad url
            if not sickrage.ANON_REDIRECT.endswith('?'):
                sickrage.ANON_REDIRECT = ''

            if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', sickrage.ROOT_DIRS):
                sickrage.ROOT_DIRS = ''

            sickrage.NAMING_FORCE_FOLDERS = check_force_season_folders()
            if sickrage.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
                sickrage.NZB_METHOD = 'blackhole'

            if not sickrage.PROVIDER_ORDER:
                sickrage.PROVIDER_ORDER = sickrage.providersDict[GenericProvider.NZB].keys() + \
                                          sickrage.providersDict[GenericProvider.TORRENT].keys()

            if sickrage.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                               'transmission', 'deluge',
                                               'deluged', 'download_station',
                                               'rtorrent', 'qbittorrent',
                                               'mlnet'):
                sickrage.TORRENT_METHOD = 'blackhole'

            if sickrage.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                         '4h', 'daily'):
                sickrage.PROPER_SEARCHER_INTERVAL = 'daily'

            if sickrage.AUTOPOSTPROCESSOR_FREQ < sickrage.MIN_AUTOPOSTPROCESSOR_FREQ:
                sickrage.AUTOPOSTPROCESSOR_FREQ = sickrage.MIN_AUTOPOSTPROCESSOR_FREQ

            if sickrage.NAMECACHE_FREQ < sickrage.MIN_NAMECACHE_FREQ:
                sickrage.NAMECACHE_FREQ = sickrage.MIN_NAMECACHE_FREQ

            if sickrage.DAILY_SEARCHER_FREQ < sickrage.MIN_DAILY_SEARCHER_FREQ:
                sickrage.DAILY_SEARCHER_FREQ = sickrage.MIN_DAILY_SEARCHER_FREQ

            sickrage.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
            if sickrage.BACKLOG_SEARCHER_FREQ < sickrage.MIN_BACKLOG_SEARCHER_FREQ:
                sickrage.BACKLOG_SEARCHER_FREQ = sickrage.MIN_BACKLOG_SEARCHER_FREQ

            if sickrage.VERSION_UPDATER_FREQ < sickrage.MIN_VERSION_UPDATER_FREQ:
                sickrage.VERSION_UPDATER_FREQ = sickrage.MIN_VERSION_UPDATER_FREQ

            if sickrage.SHOWUPDATE_HOUR > 23:
                sickrage.SHOWUPDATE_HOUR = 0
            elif sickrage.SHOWUPDATE_HOUR < 0:
                sickrage.SHOWUPDATE_HOUR = 0

            if sickrage.SUBTITLE_SEARCHER_FREQ < sickrage.MIN_SUBTITLE_SEARCHER_FREQ:
                sickrage.SUBTITLE_SEARCHER_FREQ = sickrage.MIN_SUBTITLE_SEARCHER_FREQ

            sickrage.NEWS_LATEST = sickrage.NEWS_LAST_READ

            if sickrage.SUBTITLES_LANGUAGES[0] == '':
                sickrage.SUBTITLES_LANGUAGES = []

            sickrage.TIME_PRESET = sickrage.TIME_PRESET_W_SECONDS.replace(
                ":%S", "")

            # initialize metadata_providers
            sickrage.metadataProvideDict = get_metadata_generator_dict()
            for cur_metadata_tuple in [
                (sickrage.METADATA_KODI, kodi),
                (sickrage.METADATA_KODI_12PLUS, kodi_12plus),
                (sickrage.METADATA_MEDIABROWSER, mediabrowser),
                (sickrage.METADATA_PS3, ps3), (sickrage.METADATA_WDTV, wdtv),
                (sickrage.METADATA_TIVO, tivo),
                (sickrage.METADATA_MEDE8ER, mede8er)
            ]:
                (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
                tmp_provider = cur_metadata_class.metadata_class()
                tmp_provider.set_config(cur_metadata_config)

                sickrage.metadataProvideDict[tmp_provider.name] = tmp_provider

            # init caches
            sickrage.NAMECACHE = nameCache()

            # init queues
            sickrage.SHOWUPDATER = ShowUpdater()
            sickrage.SHOWQUEUE = ShowQueue()
            sickrage.SEARCHQUEUE = SearchQueue()

            # load data for shows from database
            sickrage.showList = load_shows()

            # init searchers
            sickrage.DAILYSEARCHER = DailySearcher()
            sickrage.BACKLOGSEARCHER = BacklogSearcher()
            sickrage.PROPERSEARCHER = ProperSearcher()
            sickrage.TRAKTSEARCHER = TraktSearcher()
            sickrage.SUBTITLESEARCHER = SubtitleSearcher()

            # init scheduler
            sickrage.Scheduler = Scheduler()

            # add version checker job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.VERSIONUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours': sickrage.VERSION_UPDATER_FREQ,
                        'min': sickrage.MIN_VERSION_UPDATER_FREQ
                    }),
                name="VERSIONUPDATER",
                id="VERSIONUPDATER",
                replace_existing=True)

            # add network timezones updater job to scheduler
            sickrage.Scheduler.add_job(update_network_dict,
                                       SRIntervalTrigger(**{'days': 1}),
                                       name="TZUPDATER",
                                       id="TZUPDATER",
                                       replace_existing=True)

            # add namecache updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.NAMECACHE.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.NAMECACHE_FREQ,
                        'min': sickrage.MIN_NAMECACHE_FREQ
                    }),
                name="NAMECACHE",
                id="NAMECACHE",
                replace_existing=True)

            # add show queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SHOWQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 3}),
                                       name="SHOWQUEUE",
                                       id="SHOWQUEUE",
                                       replace_existing=True)

            # add search queue job to scheduler
            sickrage.Scheduler.add_job(sickrage.SEARCHQUEUE.run,
                                       SRIntervalTrigger(**{'seconds': 1}),
                                       name="SEARCHQUEUE",
                                       id="SEARCHQUEUE",
                                       replace_existing=True)

            # add show updater job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.SHOWUPDATER.run,
                SRIntervalTrigger(
                    **{
                        'hours':
                        1,
                        'start_date':
                        datetime.datetime.now().replace(
                            hour=sickrage.SHOWUPDATE_HOUR)
                    }),
                name="SHOWUPDATER",
                id="SHOWUPDATER",
                replace_existing=True)

            # add daily search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.DAILYSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.DAILY_SEARCHER_FREQ,
                        'min': sickrage.MIN_DAILY_SEARCHER_FREQ
                    }),
                name="DAILYSEARCHER",
                id="DAILYSEARCHER",
                replace_existing=True)

            # add backlog search job to scheduler
            sickrage.Scheduler.add_job(
                sickrage.BACKLOGSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.BACKLOG_SEARCHER_FREQ,
                        'min': sickrage.MIN_BACKLOG_SEARCHER_FREQ
                    }),
                name="BACKLOG",
                id="BACKLOG",
                replace_existing=True)

            # add auto-postprocessing job to scheduler
            job = sickrage.Scheduler.add_job(
                auto_postprocessor.PostProcessor().run,
                SRIntervalTrigger(
                    **{
                        'minutes': sickrage.AUTOPOSTPROCESSOR_FREQ,
                        'min': sickrage.MIN_AUTOPOSTPROCESSOR_FREQ
                    }),
                name="POSTPROCESSOR",
                id="POSTPROCESSOR",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.PROCESS_AUTOMATICALLY]()

            # add find propers job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.PROPERSEARCHER.run,
                SRIntervalTrigger(
                    **{
                        'minutes': {
                            '15m': 15,
                            '45m': 45,
                            '90m': 90,
                            '4h': 4 * 60,
                            'daily': 24 * 60
                        }[sickrage.PROPER_SEARCHER_INTERVAL]
                    }),
                name="PROPERSEARCHER",
                id="PROPERSEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.DOWNLOAD_PROPERS]()

            # add trakt.tv checker job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.TRAKTSEARCHER.run,
                SRIntervalTrigger(**{'hours': 1}),
                name="TRAKTSEARCHER",
                id="TRAKTSEARCHER",
                replace_existing=True,
            )
            (job.pause, job.resume)[sickrage.USE_TRAKT]()

            # add subtitles finder job to scheduler
            job = sickrage.Scheduler.add_job(
                sickrage.SUBTITLESEARCHER.run,
                SRIntervalTrigger(
                    **{'hours': sickrage.SUBTITLE_SEARCHER_FREQ}),
                name="SUBTITLESEARCHER",
                id="SUBTITLESEARCHER",
                replace_existing=True)
            (job.pause, job.resume)[sickrage.USE_SUBTITLES]()

            # initialize web server
            sickrage.WEB_SERVER = SRWebServer(
                **{
                    'port':
                    int(sickrage.WEB_PORT),
                    'host':
                    sickrage.WEB_HOST,
                    'data_root':
                    sickrage.DATA_DIR,
                    'gui_root':
                    sickrage.GUI_DIR,
                    'web_root':
                    sickrage.WEB_ROOT,
                    'log_dir':
                    sickrage.WEB_LOG or sickrage.LOG_DIR,
                    'username':
                    sickrage.WEB_USERNAME,
                    'password':
                    sickrage.WEB_PASSWORD,
                    'enable_https':
                    sickrage.ENABLE_HTTPS,
                    'handle_reverse_proxy':
                    sickrage.HANDLE_REVERSE_PROXY,
                    'https_cert':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_CERT),
                    'https_key':
                    os.path.join(sickrage.ROOT_DIR, sickrage.HTTPS_KEY),
                    'daemonize':
                    sickrage.DAEMONIZE,
                    'pidfile':
                    sickrage.PIDFILE,
                    'stop_timeout':
                    3,
                    'nolaunch':
                    sickrage.WEB_NOLAUNCH
                })

            sickrage.LOGGER.info("SiCKRAGE VERSION:[{}] CONFIG:[{}]".format(
                sickrage.VERSION, sickrage.CONFIG_FILE))
            sickrage.INITIALIZED = True
            return True