Example #1
0
def set_up_anidb_connection():
    """Connect to anidb."""
    if not app.USE_ANIDB:
        log.debug(u'Usage of anidb disabled. Skipping')
        return False

    if not app.ANIDB_USERNAME and not app.ANIDB_PASSWORD:
        log.debug(u'anidb username and/or password are not set.'
                  u' Aborting anidb lookup.')
        return False

    if not app.ADBA_CONNECTION:
        try:
            app.ADBA_CONNECTION = adba.Connection(keepAlive=True)
        except Exception as error:
            log.warning(u'anidb exception msg: {0!r}', error)
            return False

    try:
        if not app.ADBA_CONNECTION.authed():
            app.ADBA_CONNECTION.auth(app.ANIDB_USERNAME, app.ANIDB_PASSWORD)
        else:
            return True
    except Exception as error:
        log.warning(u'anidb exception msg: {0!r}', error)
        return False

    return app.ADBA_CONNECTION.authed()
Example #2
0
def set_up_anidb_connection():
    """Connect to anidb."""
    if not sickrage.app.config.use_anidb:
        sickrage.app.log.debug('Usage of AniDB disabled. Skipping')
        return False

    if not sickrage.app.config.anidb_username and not sickrage.app.config.anidb_password:
        sickrage.app.log.debug(
            'AniDB username and/or password are not set. Aborting anidb lookup.'
        )
        return False

    if not sickrage.app.adba_connection:
        try:
            sickrage.app.adba_connection = adba.Connection(keepAlive=True)
        except Exception as error:
            sickrage.app.log.warning(
                'AniDB exception msg: {0!r}'.format(error))
            return False

    try:
        if not sickrage.app.adba_connection.authed():
            sickrage.app.adba_connection.auth(
                sickrage.app.config.anidb_username,
                sickrage.app.config.anidb_password)
        else:
            return True
    except Exception as error:
        sickrage.app.log.warning('AniDB exception msg: {0!r}'.format(error))
        return False

    return sickrage.app.adba_connection.authed()
Example #3
0
def set_up_anidb_connection():
    if not sickbeard.USE_ANIDB:
        logger.log(u'Usage of anidb disabled. Skipping', logger.DEBUG)
        return False

    if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
        logger.log(
            u'anidb username and/or password are not set. Aborting anidb lookup.',
            logger.DEBUG)
        return False

    if not sickbeard.ADBA_CONNECTION:
        # anidb_logger = (lambda x: logger.log('ANIDB: ' + str(x)), logger.DEBUG)
        sickbeard.ADBA_CONNECTION = adba.Connection(
            keepAlive=True)  # , log=anidb_logger)

    auth = False
    try:
        auth = sickbeard.ADBA_CONNECTION.authed()
    except (BaseException, Exception) as e:
        logger.log(u'exception msg: ' + ex(e))
        pass

    if not auth:
        try:
            sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME,
                                           sickbeard.ANIDB_PASSWORD)
        except (BaseException, Exception) as e:
            logger.log(u'exception msg: ' + ex(e))
            return False
    else:
        return True

    return sickbeard.ADBA_CONNECTION.authed()
Example #4
0
    def connect(self):

        global CONNECTION
        global LAST_ACCESS

        try:
            username = Prefs["username"]
            password = Prefs["password"]

            if CONNECTION is not None:
                if not CONNECTION.authed():
                    CONNECTION.auth(username, password)

                Log("Reusing authenticated connection")
                LAST_ACCESS = datetime.now()
                return CONNECTION

            CONNECTION = adba.Connection(log=True)

            Thread.CreateTimer(300, checkConnection)

            if not username or not password:
                Log("Set username and password!")
                return None

            CONNECTION.auth(username, password)
            Log("Auth ok!")

        except Exception, e:
            Log("Connection exception, msg: " + str(e))
            raise e
Example #5
0
def set_up_anidb_connection():
    if not sickbeard.USE_ANIDB:
        logger.log(u"Usage of anidb disabled. Skiping", logger.DEBUG)
        return False

    if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
        logger.log(u"anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
        return False

    if not sickbeard.ADBA_CONNECTION:
        anidb_logger = lambda x: logger.log("ANIDB: " + str(x), logger.DEBUG)
        sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)

    if not sickbeard.ADBA_CONNECTION.authed():
        try:
            sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
        except Exception, e:
            logger.log(u"exception msg: " + str(e))
            return False
Example #6
0
    def connect(self):
        "Create an API session and authenticate with the stored credentials."

        global CONNECTION
        global LAST_ACCESS

        try:
            username = Prefs["username"]
            password = Prefs["password"]

            if CONNECTION is not None:
                if not CONNECTION.authed():
                    CONNECTION.auth(username, password)

                Log("Reusing authenticated connection")
                LAST_ACCESS = datetime.now()
                return CONNECTION

            CONNECTION = adba.Connection(log=True, keepAlive=True)

            Thread.CreateTimer(300, checkConnection)

            if not username or not password:
                Log("Set username and password!")
                return None

            CONNECTION.auth(username, password)
            Log("Auth ok!")

        except Exception:
            Log("Connection exception, traceback:")
            Log("".join(
                traceback.format_exception(sys.exc_type, sys.exc_value,
                                           sys.exc_traceback)))
            raise Exception("See INFO-level message above for traceback")

        LAST_ACCESS = datetime.now()
        return CONNECTION
Example #7
0
                        default=[],
                        help='All files and/or folders to be processed.')

fileArgs = fileParser.parse_args(otherArgs)

allFiles = args.files + fileArgs.files

# Start logging
FileListener = adba.StartLogging()

if args.out_file:
    sys.stdout = open(args.out_file, 'w', encoding='UTF-8')

# Issue logout of session
if args.command == 'logout':
    connection = adba.Connection(commandDelay=2.1)
    connection.logout()
    sys.exit(0)

# Check if fields are required
if args.command in ['mylistaddwithfields', 'getfields']:
    if not args.fields:
        print("Fields to retrieve are required for " + args.command + ".")
        sys.exit(0)

# Convert state to UDP
args.state = stateToUDP[args.state]

# Convert watched to UDP
if args.watched:
    viewed = viewedToUDP['watched']
Example #8
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(hours=self.config.version_updater_freq),
            name=self.version_updater.name,
            id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(update_network_dict,
                               IntervalTrigger(days=1),
                               name="TZUPDATER",
                               id="TZUPDATER")

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

        # start ioloop
        self.io_loop.start()
Example #9
0
import os
import getopt
from test_lib import *
####################################################
# here starts the stuff that is interresting for you
####################################################

# you only need to import the module
import adba

# lets see the version
print adba.version

# make a connection object
# log = True great for testing not so great for a running system (default is False)
connection = adba.Connection(log=True)

# we can always ping to see if we can reach the server
try:
    connection.ping()
except Exception, e:
    print("exception msg: " + str(e))
    print "if we cant even ping stop right here"
    exit()

# ok lets try to authenticate. we need username and pw for that
try:
    connection.auth(user, pw)
    pass
except Exception, e:
    print("exception msg: " + str(e))
Example #10
0
# along with aDBa.  If not, see <http://www.gnu.org/licenses/>.

from test_lib import *
####################################################
# here starts the stuff that is interresting for you
####################################################

# you only need to import the module
import adba

# lets see the version
print(adba.version)

# make a connection object
# log = True great for testing not so great for a running system (default is False)
connection = adba.Connection()

# we can always ping to see if we can reach the server
try:
    connection.ping()
except Exception as e:
    print(("exception msg: " + str(e)))
    print("if we cant even ping stop right here")
    exit()

# ok lets try to authenticate. we need username and pw for that
try:
    connection.auth(user, pw)
    pass
except Exception as e:
    print(("exception msg: " + str(e)))
Example #11
0
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa.  If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import getopt
from test_lib import *
import threading

####################################################
# here starts the stuff that is interesting for you
####################################################

# you only need to import the module
import adba
connection = adba.Connection(log=logwrapper)
connection.auth(user, pw)
anime = adba.Anime(connection, name="Bleach", load=True)
anime.add_notification()
Example #12
0
import sys
import os
import getopt
from test_lib import *
####################################################
# here starts the stuff that is interesting for you
####################################################

# you only need to import the module
import adba
import threading
from time import time, sleep, strftime, localtime

# lets see the version
print("version: " + str(adba.version))
connection = adba.Connection(log=logwrapper, keepAlive=True)


class ThreadLookUp(threading.Thread):
    def __init__(self, animeName, index):
        super(ThreadLookUp, self).__init__()
        self.animeName = animeName
        self.name = "Thread - " + str(self.animeName) + " - " + str(index)
        print(self.name + " started")

    def run(self):
        if not connection.authed():
            log_function("authenticating in thread: " + self.getName())
            if (user and pw):
                connection.auth(user, pw)
        else:
Example #13
0
import sys
import os
import getopt
from test_lib import *
####################################################
# here starts the stuff that is interesting for you
####################################################

# you only need to import the module
import adba
import threading
from time import time, sleep, strftime, localtime

# lets see the version
print(("version: " + str(adba.version)))
connection = adba.Connection(keepAlive=True)


class ThreadLookUp(threading.Thread):
    def __init__(self, animeName, index):
        super(ThreadLookUp, self).__init__()
        self.animeName = animeName
        self.name = "Thread - " + str(self.animeName) + " - " + str(index)
        print((self.name + " started"))

    def run(self):
        if not connection.authed():
            log_function("authenticating in thread: " + self.getName())
            if user and pw:
                connection.auth(user, pw)
        else:
Example #14
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.logFile = self.srConfig.LOG_FILE
        self.srLogger.debugLogging = self.srConfig.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE

        # start logger
        self.srLogger.start()

        # user agent
        if self.srConfig.RANDOM_USER_AGENT:
            self.USER_AGENT = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.USER_AGENT

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [self.mainDB, self.cacheDB, self.failedDB]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not self.srConfig.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time(
        ) - 604800:  # 7 days
            self.mainDB.compact()
            self.srConfig.LAST_DB_COMPACT = int(time.time())

        # load name cache
        self.NAMECACHE.load()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.srConfig.USE_ANIDB:

            def anidb_logger(msg):
                return self.srLogger.debug("AniDB: {} ".format(msg))

            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.ADBA_CONNECTION.auth(self.srConfig.ANIDB_USERNAME,
                                          self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time(
        )
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'days':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add show next episode job
        self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="SHOWNEXTEP",
                                 id="SHOWNEXTEP")

        # add daily search job
        self.srScheduler.add_job(self.DAILYSEARCHER.run,
                                 srIntervalTrigger(
                                     **{
                                         'minutes':
                                         self.srConfig.DAILY_SEARCHER_FREQ,
                                         'min':
                                         self.srConfig.MIN_DAILY_SEARCHER_FREQ,
                                         'start_date':
                                         datetime.datetime.now() +
                                         datetime.timedelta(minutes=4)
                                     }),
                                 name="DAILYSEARCHER",
                                 id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes':
                    self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min':
                    self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
                    'start_date':
                    datetime.datetime.now() + datetime.timedelta(minutes=30)
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()