Exemplo n.º 1
0
class GamesService(WebService):
    _name = "Games Service"
    _id = "games"

    def __init__(self, Routes):
        self.scheduler = TornadoScheduler(timezone=utc)
        # self.scheduler.add_job(self.score_games)
        self.scheduler.add_job(self.score_games, 'interval', hours=1)
        self.emitter = ipc.Emitter()
        self.listener = ipc.Listener({
            "games:getScores": self.score_games
        })
        super().__init__(Routes)

    
    def score_games(self, date=None):
        today = datetime_for_game(date)
        print("games for {}".format(str(today)))    
        games = NBAGame.find_sync({"Date": today})
        print("imported {} games today".format(len(games)))    
        box_results = []
        score_results = []
        game_count = 0
        for game in games:
            box_results.append( nba_jobs.box_score.delay(game["fid"]) )
        for result in box_results:
            score_results.append( nba_jobs.score_players.delay(result.get()) )
        for result in score_results:
            playerScoring = result.get()
            self.emitter.publish("games:scores", playerScoring)
            game_count += 1    
        print("scored {} games".format(game_count))
class SnapshotUtil(object):
    instance = None

    def __init__(self):
        self.scheduler = TornadoScheduler()
    @staticmethod
    def get_instance():
        if not SnapshotUtil.instance:
            SnapshotUtil.instance = SnapshotUtil()
        return SnapshotUtil.instance


    def set_snapshot_interval(self, pos, hours):
        self.scheduler.reschedule_job('snapshot_%d' % pos, trigger='interval', hours=hours)


    def snapshot_start(self):
        self.scheduler.start()


    def snapshot_stop(self):
        self.scheduler.shutdown()

    def snapshot(self, pos, url):
        print "snapshot",pos,url
        path = os.path.join(os.getcwd(),CAPTURED_DIR)
        path = os.path.join(path, str(pos)+"_"+time.strftime("%Y_%m_%d_%H_%M_%S",time.localtime(time.time()))+".jpeg")
        command = "sudo ffmpeg -ss 30 -i \'%s\' -y -t 30 -r 1 -f image2 %s" % (url, path)
        print command
        os.system(command)
        time.sleep(5)
        add_caputured_image(os.path.basename(path), pos, 1920, 1080)
        self.update_image(path, pos)


    def update_image(self, path, pos):
        datagen, headers = multipart_encode({'file': open(path, "rb"), 'pos': pos, "mac": get_mac_address(),'created_at': time.time()})
        url = 'http://%s:%d%s'%(SERVER_WEBSITE, API_PORT, UPLOAD_IMG_URL)
        print url
        request = urllib2.Request(url, datagen, headers)
        res = urllib2.urlopen(request, timeout=30).read()
        print "ok"
        if res == "ok":
            return
        else:
            return

    def remove_snapshot(self,pos):
        self.scheduler.remove_job("snapshot_%d"%pos)

    def add_snapshot(self, pos, url, hours):
        self.scheduler.add_job(self.snapshot, 'interval', args=[ pos, url], seconds=100, id="snapshot_%d"%(pos))

    def init_snapshot(self):
        positions = get_positions()
        for pos in positions:
            self.add_snapshot(int(pos['position']), pos['ip_address'], pos['duration'])
Exemplo n.º 3
0
def main():
    # Use the above instantiated scheduler
    # Set Tornado Scheduler
    scheduler = TornadoScheduler()
    # Use the imported jobs, every 60 minutes
    scheduler.add_job(log_theta, 'interval', minutes=60)
    scheduler.start()
    application.listen(settings["listen.port"])
    tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 4
0
class Application(tornado.web.Application):

    def __init__(self, with_celery, model):
        tornado.web.Application.__init__(self, urls, **settings)
        self.mongo_client = motor.motor_tornado.MotorClient(
            '/usr/local/var/run/mongodb/mongodb-27017.sock')
        self.db = self.mongo_client.ubernow
        self.redis = redis.StrictRedis(unix_socket_path='/usr/loca/var/run/redis/redis.sock')
        self.scheduler = TornadoScheduler(
            jobstores=JOBSTORES, executors=EXECUTORS, job_defaults=JOB_DEFAULTS, timezone=utc)
        self.scheduler.start()
        self.model = model
        self.with_celery = with_celery
    def __init__(self):
        self._jobs = []
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    # --- * PROJECT * ---
                    (r'/', ListProjectsHandler),
                    (r'/project/(.+)', ProjectDetailsHandler), # project id

                    # --- * JOB * ---
                    (r'/exec/([^/]+)/([^/]+)', ExecuteProjectJobHandler), # project id, job id
                    # View job configuration context. Args: job id
                    (r'/job/([^/]+)/configuration', JobConfigurationHandler),

                    # --- * CONFIGURATION * ---
                    # Change branch 
                    (r'/change/branch', ChangeBranchHandler),
                    ],
                template_path = os.path.join(os.path.dirname(__file__), '.', 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), '.', 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self.scheduler.start()
Exemplo n.º 6
0
def tornado_schedule():
    from tornado.ioloop import IOLoop
    from apscheduler.schedulers.tornado import TornadoScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 7
0
def main():
    tornado.options.parse_config_file(os.path.join(os.path.dirname(__file__), "config.py"))
    tornado.options.parse_command_line()
    global app
    app = Application()
    app.iphone_data = {}

    scraper_all()
    scheduler = TornadoScheduler()
    time_interval = 60
    scheduler.add_job(scraper_all, 'interval', seconds=time_interval)
    scheduler.add_job(update_data, 'interval', seconds=time_interval)
    scheduler.start()

    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()
    def __init__(self):
        self._jobs = []
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexHandler),
                    # --- * CONFIGURATION * ---
                    # Context of resource configuration file. Args: resource configuration filename
                    (r'/config/resource/([^/]+)', ResourceConfigHandler),
                    # Context of translation configuration file. Args: translation configuration filename
                    (r'/config/translation/([^/]+)', TranslationConfigHandler),

                    # --- * DASHBOARD * ---
                    # Dashboard.
                    (r'/dashboard', DashboardHandler),

                    # --- * JOB * ---
                    # List of jobs.
                    (r'/jobs', ListJobsHandler),
                    # List resource slugs for resources in a project. Args: job id
                    (r'/job/([^/]+)/check/slugs', CheckSlugsHandler),
                    # Details of a job. Args: job id
                    (r'/job/([^/]+)/details', JobDetailsHandler),
                    # Context of most recent log for a job. Args: job id

                    # --- * LOG * ---
                    (r'/log/([^/]+)/context', LogContextHandler),
                    # List of projects.

                    # --- * PROJECT * ---
                    (r'/projects', ListProjectsHandler),
                    # Details of a project. Args: project id
                    (r'/project/([^/]+)/details', ProjectDetailsHandler),
                    # List of projects in translation platform (e.g. Transifex projects) Args: translation platform name
  
                    # --- * TRANSLATION PLATFORM * ---
                    (r'/translation/([^/]+)/projects', ListTranslationProjects),
                    # Details of a project in translation platform. Args: translation platform name, project slag
                    (r'/translation/([^/]+)/project/([^/]+)/details', TranslationProjectDetails),
                    # List of all translation strings for a resource of a language. Args: translation platform name, project slug, resource slug, langauge code
                    (r'/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', TranslationProjectTranslationStrings),
                    # Details of a source string. Args: translation platform name, project slug, resource slug
                    (r'/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/strings', TranslationProjectSourceStringDetails)
                ],
                template_path = os.path.join(os.path.dirname(__file__), '.', 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), '.', 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self.scheduler.start()
Exemplo n.º 9
0
 def __init__(self, Routes):
     self.scheduler = TornadoScheduler(timezone=utc)
     # self.scheduler.add_job(self.score_games)
     self.scheduler.add_job(self.score_games, 'interval', hours=1)
     self.emitter = ipc.Emitter()
     self.listener = ipc.Listener({
         "games:getScores": self.score_games
     })
     super().__init__(Routes)
class ProjectManagerServer():
    def __init__(self):
        self._jobs = []
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    # --- * PROJECT * ---
                    (r'/', ListProjectsHandler),
                    (r'/project/(.+)', ProjectDetailsHandler), # project id

                    # --- * JOB * ---
                    (r'/exec/([^/]+)/([^/]+)', ExecuteProjectJobHandler), # project id, job id
                    # View job configuration context. Args: job id
                    (r'/job/([^/]+)/configuration', JobConfigurationHandler),

                    # --- * CONFIGURATION * ---
                    # Change branch 
                    (r'/change/branch', ChangeBranchHandler),
                    ],
                template_path = os.path.join(os.path.dirname(__file__), '.', 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), '.', 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self.scheduler.start()

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')

        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping console...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)
Exemplo n.º 11
0
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # cpu count
        self.CPU_COUNT = cpu_count()

        # generate notifiers dict
        self.notifiersDict = AttrDict(libnotify=LibnotifyNotifier(),
                                      kodi_notifier=KODINotifier(),
                                      plex_notifier=PLEXNotifier(),
                                      emby_notifier=EMBYNotifier(),
                                      nmj_notifier=NMJNotifier(),
                                      nmjv2_notifier=NMJv2Notifier(),
                                      synoindex_notifier=synoIndexNotifier(),
                                      synology_notifier=synologyNotifier(),
                                      pytivo_notifier=pyTivoNotifier(),
                                      growl_notifier=GrowlNotifier(),
                                      prowl_notifier=ProwlNotifier(),
                                      libnotify_notifier=LibnotifyNotifier(),
                                      pushover_notifier=PushoverNotifier(),
                                      boxcar_notifier=BoxcarNotifier(),
                                      boxcar2_notifier=Boxcar2Notifier(),
                                      nma_notifier=NMA_Notifier(),
                                      pushalot_notifier=PushalotNotifier(),
                                      pushbullet_notifier=PushbulletNotifier(),
                                      freemobile_notifier=FreeMobileNotifier(),
                                      twitter_notifier=TwitterNotifier(),
                                      trakt_notifier=TraktNotifier(),
                                      email_notifier=EmailNotifier())

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []
Exemplo n.º 12
0
def serve():
    """Read configuration and start the server."""
    global EMAIL_FROM, SMTP_SETTINGS
    jobstores = {'default': SQLAlchemyJobStore(url=JOBS_STORE)}
    scheduler = TornadoScheduler(jobstores=jobstores, timezone=pytz.utc)
    scheduler.start()

    define('port', default=3210, help='run on the given port', type=int)
    define('address',
           default='',
           help='bind the server at the given address',
           type=str)
    define('ssl_cert',
           default=os.path.join(os.path.dirname(__file__), 'ssl',
                                'diffido_cert.pem'),
           help='specify the SSL certificate to use for secure connections')
    define('ssl_key',
           default=os.path.join(os.path.dirname(__file__), 'ssl',
                                'diffido_key.pem'),
           help='specify the SSL private key to use for secure connections')
    define('admin-email',
           default='',
           help='email address of the site administrator',
           type=str)
    define('smtp-host',
           default='localhost',
           help='SMTP server address',
           type=str)
    define('smtp-port', default=0, help='SMTP server port', type=int)
    define('smtp-local-hostname',
           default=None,
           help='SMTP local hostname',
           type=str)
    define('smtp-use-ssl',
           default=False,
           help='Use SSL to connect to the SMTP server',
           type=bool)
    define('smtp-starttls',
           default=False,
           help='Use STARTTLS to connect to the SMTP server',
           type=bool)
    define('smtp-ssl-keyfile', default=None, help='SSL key file', type=str)
    define('smtp-ssl-certfile', default=None, help='SSL cert file', type=str)
    define('smtp-ssl-context', default=None, help='SSL context', type=str)
    define('debug', default=False, help='run in debug mode', type=bool)
    define('config',
           help='read configuration file',
           callback=lambda path: tornado.options.parse_config_file(
               path, final=False))
    if not options.config and os.path.isfile(DEFAULT_CONF):
        tornado.options.parse_config_file(DEFAULT_CONF, final=False)
    tornado.options.parse_command_line()
    if options.admin_email:
        EMAIL_FROM = options.admin_email

    for key, value in options.as_dict().items():
        if key.startswith('smtp-'):
            SMTP_SETTINGS[key] = value

    if options.debug:
        logger.setLevel(logging.DEBUG)

    ssl_options = {}
    if os.path.isfile(options.ssl_key) and os.path.isfile(options.ssl_cert):
        ssl_options = dict(certfile=options.ssl_cert, keyfile=options.ssl_key)

    init_params = dict(listen_port=options.port,
                       logger=logger,
                       ssl_options=ssl_options,
                       scheduler=scheduler)
    git_init()

    _reset_schedules_path = r'schedules/reset'
    _schedule_run_path = r'schedules/(?P<id_>\d+)/run'
    _schedules_path = r'schedules/?(?P<id_>\d+)?'
    _history_path = r'schedules/?(?P<id_>\d+)/history'
    _diff_path = r'schedules/(?P<id_>\d+)/diff/(?P<commit_id>[0-9a-f]+)/?(?P<old_commit_id>[0-9a-f]+)?/?'
    application = tornado.web.Application([
        (r'/api/%s' % _reset_schedules_path, ResetSchedulesHandler,
         init_params),
        (r'/api/v%s/%s' % (API_VERSION, _reset_schedules_path),
         ResetSchedulesHandler, init_params),
        (r'/api/%s' % _schedule_run_path, RunScheduleHandler, init_params),
        (r'/api/v%s/%s' %
         (API_VERSION, _schedule_run_path), RunScheduleHandler, init_params),
        (r'/api/%s' % _history_path, HistoryHandler, init_params),
        (r'/api/v%s/%s' %
         (API_VERSION, _history_path), HistoryHandler, init_params),
        (r'/api/%s' % _diff_path, DiffHandler, init_params),
        (r'/api/v%s/%s' % (API_VERSION, _diff_path), DiffHandler, init_params),
        (r'/api/%s' % _schedules_path, SchedulesHandler, init_params),
        (r'/api/v%s/%s' %
         (API_VERSION, _schedules_path), SchedulesHandler, init_params),
        (r'/?(.*)', TemplateHandler, init_params),
    ],
                                          static_path=os.path.join(
                                              os.path.dirname(__file__),
                                              'dist/static'),
                                          template_path=os.path.join(
                                              os.path.dirname(__file__),
                                              'dist/'),
                                          debug=options.debug)
    http_server = tornado.httpserver.HTTPServer(application,
                                                ssl_options=ssl_options
                                                or None)
    logger.info('Start serving on %s://%s:%d',
                'https' if ssl_options else 'http',
                options.address if options.address else '127.0.0.1',
                options.port)
    http_server.listen(options.port, options.address)
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 13
0
from apscheduler.schedulers.tornado import TornadoScheduler

import atexit
from datetime import date

# port = 10100

import argparse
parser = argparse.ArgumentParser(description='Run central migration service.')
parser.add_argument('port', help='The port to listen on.')
args = parser.parse_args()
port = args.port


# https://stackoverflow.com/questions/21214270/scheduling-a-function-to-run-every-hour-on-flask
# garbage collection scheduler
def garbage_collect():
    print('running on port {}'.format(port))


if __name__ == "__main__":
    gc_scheduler = TornadoScheduler()
    gc_scheduler.add_job(garbage_collect, 'interval', seconds=3)
    gc_scheduler.start()

    create_central_migration_service().listen(port)
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 14
0
    def __init__(self):
        self.started = False
        self.io_loop = IOLoop.current()

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []
Exemplo n.º 15
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = None
        self.pid = os.getpid()
        self.showlist = []

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",)
        self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
                                     1: [" - " + x for x in self.naming_ep_type],
                                     2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]}
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.event_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # shutdown event queue
            if self.event_queue:
                self.log.debug("Shutting down event queue")
                self.event_queue.shutdown()
                del self.event_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug("Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        if self.io_loop:
            self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.save_to_db()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))

            try:
                self.log.debug("Loading data for show: [{}]".format(show.name))
                self.showlist.append(show)
                self.quicksearch_cache.add_show(show.indexerid)
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" % (show.location, str(e)))
Exemplo n.º 16
0
from datetime import datetime
import os

from tornado.ioloop import IOLoop
from apscheduler.schedulers.tornado import TornadoScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 17
0
 def __init__(self):##牌类型,牌字
    self.m_scheduler = TornadoScheduler()
    self.start()
    for container in docker_client.containers():
        indexer = DockerPackageIndexer(container, docker_client)
        indexer.index()

if __name__ == '__main__':
    tornado.log.enable_pretty_logging()
    tornado.options.parse_command_line()

    try:
        docker_client = Client(**kwargs_from_env(assert_hostname=False))
        docker_client.ping()
    except ConnectionError:
        logging.error("Unable to connect to Docker. Ensure Docker is running and environment variables are set.")
        exit(1)

    scheduler = TornadoScheduler()
    scheduler.add_job(lambda: index_container_packages(docker_client), 'interval', seconds=tornado.options.options.interval)
    scheduler.start()

    app = tornado.web.Application([
        (r"/", SearchHandler),
    ], debug=tornado.options.options.reload)

    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(tornado.options.options.port)

    try:
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 19
0
    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init sentry
        self.init_sentry()

        # scheduler
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})

        # init core classes
        self.api = API()
        self.config = Config(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.notification_providers = NotificationProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.series_providers = SeriesProviders()
        self.log = Logger()
        self.alerts = Notifications()
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.announcements = Announcements()
        self.amqp_consumer = AMQPConsumer()

        # authorization sso client
        self.auth_server = AuthServer()

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            self.log.info('Performing restore of backup files')
            success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                # remove restore files
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # setup databases
        self.main_db.setup()
        self.config.db.setup()
        self.cache_db.setup()

        # load config
        self.config.load()

        # migrate config
        self.config.migrate_config_file(self.config_file)

        # add server id tag to sentry
        sentry_sdk.set_tag('server_id', self.config.general.server_id)

        # add user to sentry
        sentry_sdk.set_user({
            'id': self.config.user.sub_id,
            'username': self.config.user.username,
            'email': self.config.user.email
        })

        # config overrides
        if self.web_port:
            self.config.general.web_port = self.web_port
        if self.web_root:
            self.config.general.web_root = self.web_root

        # set language
        change_gui_lang(self.config.gui.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.general.socket_timeout)

        # set ssl cert/key filenames
        self.https_cert_file = os.path.abspath(os.path.join(self.data_dir, 'server.crt'))
        self.https_key_file = os.path.abspath(os.path.join(self.data_dir, 'server.key'))

        # setup logger settings
        self.log.logSize = self.config.general.log_size
        self.log.logNr = self.config.general.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.debug or self.config.general.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.general.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        if self.config.general.default_page not in DefaultHomePage:
            self.config.general.default_page = DefaultHomePage.HOME

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.general.anon_redirect.endswith('?'):
            self.config.general.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.general.root_dirs):
            self.config.general.root_dirs = ''

        self.naming_force_folders = check_force_season_folders()

        if self.config.general.nzb_method not in NzbMethod:
            self.config.general.nzb_method = NzbMethod.BLACKHOLE

        if self.config.general.torrent_method not in TorrentMethod:
            self.config.general.torrent_method = TorrentMethod.BLACKHOLE

        if self.config.general.auto_postprocessor_freq < self.min_auto_postprocessor_freq:
            self.config.general.auto_postprocessor_freq = self.min_auto_postprocessor_freq

        if self.config.general.daily_searcher_freq < self.min_daily_searcher_freq:
            self.config.general.daily_searcher_freq = self.min_daily_searcher_freq

        if self.config.general.backlog_searcher_freq < self.min_backlog_searcher_freq:
            self.config.general.backlog_searcher_freq = self.min_backlog_searcher_freq

        if self.config.general.version_updater_freq < self.min_version_updater_freq:
            self.config.general.version_updater_freq = self.min_version_updater_freq

        if self.config.general.subtitle_searcher_freq < self.min_subtitle_searcher_freq:
            self.config.general.subtitle_searcher_freq = self.min_subtitle_searcher_freq

        if self.config.failed_snatches.age < self.min_failed_snatch_age:
            self.config.failed_snatches.age = self.min_failed_snatch_age

        if self.config.general.proper_searcher_interval not in CheckPropersInterval:
            self.config.general.proper_searcher_interval = CheckPropersInterval.DAILY

        if self.config.general.show_update_hour < 0 or self.config.general.show_update_hour > 23:
            self.config.general.show_update_hour = 0

        # add app updater job
        self.scheduler.add_job(
            self.version_updater.task,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.task,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.general.show_update_hour),
                timezone='utc'
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.task,
            IntervalTrigger(
                minutes=15,
                timezone='utc'
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.task,
            IntervalTrigger(
                minutes=self.config.general.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.task,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.task,
            IntervalTrigger(
                minutes=self.config.general.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30),
                timezone='utc'
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.task,
            IntervalTrigger(
                minutes=self.config.general.auto_postprocessor_freq,
                timezone='utc'
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.task,
            IntervalTrigger(minutes=self.config.general.proper_searcher_interval.value, timezone='utc'),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.task,
            IntervalTrigger(
                hours=1,
                timezone='utc'
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.task,
            IntervalTrigger(
                hours=self.config.general.subtitle_searcher_freq,
                timezone='utc'
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.task,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime,
                timezone='utc'
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # start queues
        self.search_queue.start_worker(self.config.general.max_queue_workers)
        self.show_queue.start_worker(self.config.general.max_queue_workers)
        self.postprocessor_queue.start_worker(self.config.general.max_queue_workers)

        # start web server
        self.wserver.start()

        # start scheduler service
        self.scheduler.start()

        # perform server checkup
        IOLoop.current().add_callback(self.server_checkup)

        # load shows
        IOLoop.current().add_callback(self.load_shows)

        # perform version update check
        IOLoop.current().spawn_callback(self.version_updater.check_for_update)

        # load network timezones
        IOLoop.current().spawn_callback(self.tz_updater.update_network_timezones)

        # load search provider urls
        IOLoop.current().spawn_callback(self.search_providers.update_urls)

        # startup message
        IOLoop.current().add_callback(self.startup_message)

        # launch browser
        IOLoop.current().add_callback(self.launch_browser)

        # watch websocket message queue
        PeriodicCallback(check_web_socket_queue, 100).start()

        # perform server checkups every hour
        PeriodicCallback(self.server_checkup, 1 * 60 * 60 * 1000).start()

        # perform shutdown trigger check every 5 seconds
        PeriodicCallback(self.shutdown_trigger, 5 * 1000).start()

        # start ioloop
        IOLoop.current().start()
Exemplo n.º 20
0
class Core(object):
    def __init__(self):
        self.started = False
        self.loading_shows = False
        self.daemon = None
        self.pid = os.getpid()

        self.gui_static_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'static')
        self.gui_views_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'views')
        self.gui_app_dir = os.path.join(sickrage.PROG_DIR, 'core', 'webserver', 'app')

        self.https_cert_file = None
        self.https_key_file = None

        self.trakt_api_key = '5c65f55e11d48c35385d9e8670615763a605fad28374c8ae553a7b7a50651ddd'
        self.trakt_api_secret = 'b53e32045ac122a445ef163e6d859403301ffe9b17fb8321d428531b69022a82'
        self.trakt_app_id = '4562'

        self.fanart_api_key = '9b3afaf26f6241bdb57d6cc6bd798da7'

        self.git_remote = "origin"
        self.git_remote_url = "https://git.sickrage.ca/SiCKRAGE/sickrage"

        self.unrar_tool = rarfile.UNRAR_TOOL

        self.naming_force_folders = False

        self.min_auto_postprocessor_freq = 1
        self.min_daily_searcher_freq = 10
        self.min_backlog_searcher_freq = 10
        self.min_version_updater_freq = 1
        self.min_subtitle_searcher_freq = 1
        self.min_failed_snatch_age = 1

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.shows = {}
        self.shows_recent = deque(maxlen=5)

        self.main_db = None
        self.cache_db = None

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.disable_updates = None
        self.web_port = None
        self.web_host = None
        self.web_root = None
        self.developer = None
        self.db_type = None
        self.db_prefix = None
        self.db_host = None
        self.db_port = None
        self.db_username = None
        self.db_password = None
        self.debug = None
        self.latest_version_string = None

        self.naming_ep_type = (
            "%(seasonnumber)dx%(episodenumber)02d",
            "s%(seasonnumber)02de%(episodenumber)02d",
            "S%(seasonnumber)02dE%(episodenumber)02d",
            "%(seasonnumber)02dx%(episodenumber)02d",
            "S%(seasonnumber)02d E%(episodenumber)02d"
        )

        self.sports_ep_type = (
            "%(seasonnumber)dx%(episodenumber)02d",
            "s%(seasonnumber)02de%(episodenumber)02d",
            "S%(seasonnumber)02dE%(episodenumber)02d",
            "%(seasonnumber)02dx%(episodenumber)02d",
            "S%(seasonnumber)02 dE%(episodenumber)02d"
        )

        self.naming_ep_type_text = (
            "1x02",
            "s01e02",
            "S01E02",
            "01x02",
            "S01 E02"
        )

        self.naming_multi_ep_type = {
            0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
            1: [" - " + x for x in self.naming_ep_type],
            2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
        }

        self.naming_multi_ep_type_text = (
            "extend",
            "duplicate",
            "repeat"
        )

        self.naming_sep_type = (
            " - ",
            " "
        )

        self.naming_sep_type_text = (
            " - ",
            "space"
        )

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.notification_providers = {}
        self.metadata_providers = {}
        self.search_providers = {}
        self.series_providers = {}

        self.adba_connection = None
        self.log = None
        self.config = None
        self.alerts = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.failed_snatch_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.auth_server = None
        self.announcements = None
        self.api = None
        self.amqp_consumer = None

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init sentry
        self.init_sentry()

        # scheduler
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})

        # init core classes
        self.api = API()
        self.config = Config(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host, self.db_port, self.db_username, self.db_password)
        self.notification_providers = NotificationProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.series_providers = SeriesProviders()
        self.log = Logger()
        self.alerts = Notifications()
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.announcements = Announcements()
        self.amqp_consumer = AMQPConsumer()

        # authorization sso client
        self.auth_server = AuthServer()

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            self.log.info('Performing restore of backup files')
            success = restore_app_data(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                # remove restore files
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # setup databases
        self.main_db.setup()
        self.config.db.setup()
        self.cache_db.setup()

        # load config
        self.config.load()

        # migrate config
        self.config.migrate_config_file(self.config_file)

        # add server id tag to sentry
        sentry_sdk.set_tag('server_id', self.config.general.server_id)

        # add user to sentry
        sentry_sdk.set_user({
            'id': self.config.user.sub_id,
            'username': self.config.user.username,
            'email': self.config.user.email
        })

        # config overrides
        if self.web_port:
            self.config.general.web_port = self.web_port
        if self.web_root:
            self.config.general.web_root = self.web_root

        # set language
        change_gui_lang(self.config.gui.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.general.socket_timeout)

        # set ssl cert/key filenames
        self.https_cert_file = os.path.abspath(os.path.join(self.data_dir, 'server.crt'))
        self.https_key_file = os.path.abspath(os.path.join(self.data_dir, 'server.key'))

        # setup logger settings
        self.log.logSize = self.config.general.log_size
        self.log.logNr = self.config.general.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.debug or self.config.general.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.general.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        if self.config.general.default_page not in DefaultHomePage:
            self.config.general.default_page = DefaultHomePage.HOME

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.general.anon_redirect.endswith('?'):
            self.config.general.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.general.root_dirs):
            self.config.general.root_dirs = ''

        self.naming_force_folders = check_force_season_folders()

        if self.config.general.nzb_method not in NzbMethod:
            self.config.general.nzb_method = NzbMethod.BLACKHOLE

        if self.config.general.torrent_method not in TorrentMethod:
            self.config.general.torrent_method = TorrentMethod.BLACKHOLE

        if self.config.general.auto_postprocessor_freq < self.min_auto_postprocessor_freq:
            self.config.general.auto_postprocessor_freq = self.min_auto_postprocessor_freq

        if self.config.general.daily_searcher_freq < self.min_daily_searcher_freq:
            self.config.general.daily_searcher_freq = self.min_daily_searcher_freq

        if self.config.general.backlog_searcher_freq < self.min_backlog_searcher_freq:
            self.config.general.backlog_searcher_freq = self.min_backlog_searcher_freq

        if self.config.general.version_updater_freq < self.min_version_updater_freq:
            self.config.general.version_updater_freq = self.min_version_updater_freq

        if self.config.general.subtitle_searcher_freq < self.min_subtitle_searcher_freq:
            self.config.general.subtitle_searcher_freq = self.min_subtitle_searcher_freq

        if self.config.failed_snatches.age < self.min_failed_snatch_age:
            self.config.failed_snatches.age = self.min_failed_snatch_age

        if self.config.general.proper_searcher_interval not in CheckPropersInterval:
            self.config.general.proper_searcher_interval = CheckPropersInterval.DAILY

        if self.config.general.show_update_hour < 0 or self.config.general.show_update_hour > 23:
            self.config.general.show_update_hour = 0

        # add app updater job
        self.scheduler.add_job(
            self.version_updater.task,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.task,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.general.show_update_hour),
                timezone='utc'
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.task,
            IntervalTrigger(
                minutes=15,
                timezone='utc'
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.task,
            IntervalTrigger(
                minutes=self.config.general.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.task,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4),
                timezone='utc'
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.task,
            IntervalTrigger(
                minutes=self.config.general.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30),
                timezone='utc'
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.task,
            IntervalTrigger(
                minutes=self.config.general.auto_postprocessor_freq,
                timezone='utc'
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.task,
            IntervalTrigger(minutes=self.config.general.proper_searcher_interval.value, timezone='utc'),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.task,
            IntervalTrigger(
                hours=1,
                timezone='utc'
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.task,
            IntervalTrigger(
                hours=self.config.general.subtitle_searcher_freq,
                timezone='utc'
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.task,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime,
                timezone='utc'
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # start queues
        self.search_queue.start_worker(self.config.general.max_queue_workers)
        self.show_queue.start_worker(self.config.general.max_queue_workers)
        self.postprocessor_queue.start_worker(self.config.general.max_queue_workers)

        # start web server
        self.wserver.start()

        # start scheduler service
        self.scheduler.start()

        # perform server checkup
        IOLoop.current().add_callback(self.server_checkup)

        # load shows
        IOLoop.current().add_callback(self.load_shows)

        # perform version update check
        IOLoop.current().spawn_callback(self.version_updater.check_for_update)

        # load network timezones
        IOLoop.current().spawn_callback(self.tz_updater.update_network_timezones)

        # load search provider urls
        IOLoop.current().spawn_callback(self.search_providers.update_urls)

        # startup message
        IOLoop.current().add_callback(self.startup_message)

        # launch browser
        IOLoop.current().add_callback(self.launch_browser)

        # watch websocket message queue
        PeriodicCallback(check_web_socket_queue, 100).start()

        # perform server checkups every hour
        PeriodicCallback(self.server_checkup, 1 * 60 * 60 * 1000).start()

        # perform shutdown trigger check every 5 seconds
        PeriodicCallback(self.shutdown_trigger, 5 * 1000).start()

        # start ioloop
        IOLoop.current().start()

    def init_sentry(self):
        # sentry log handler
        sentry_logging = LoggingIntegration(
            level=logging.INFO,  # Capture info and above as breadcrumbs
            event_level=logging.ERROR  # Send errors as events
        )

        # init sentry logging
        sentry_sdk.init(
            dsn="https://[email protected]/2?verify_ssl=0",
            integrations=[sentry_logging],
            release=sickrage.version(),
            environment=('master', 'develop')['dev' in sickrage.version()],
            ignore_errors=[
                'KeyboardInterrupt',
                'PermissionError',
                'FileNotFoundError',
                'EpisodeNotFoundException'
            ]
        )

        # sentry tags
        sentry_tags = {
            'platform': platform.platform(),
            'locale': repr(locale.getdefaultlocale()),
            'python': platform.python_version(),
            'install_type': sickrage.install_type()
        }

        # set sentry tags
        for tag_key, tag_value in sentry_tags.items():
            sentry_sdk.set_tag(tag_key, tag_value)

        # set loggers to ignore
        ignored_loggers = [
            'enzyme.parsers.ebml.core',
            'subliminal.core',
            'subliminal.utils',
            'subliminal.refiners.tvdb',
            'subliminal.refiners.metadata',
            'subliminal.providers.tvsubtitles',
            'pika.connection',
            'pika.adapters.base_connection',
            'pika.adapters.utils.io_services_utils',
            'pika.adapters.utils.connection_workflow',
            'pika.adapters.utils.selector_ioloop_adapter'
        ]

        for item in ignored_loggers:
            ignore_logger(item)

    def server_checkup(self):
        if self.config.general.server_id:
            server_status = self.api.server.get_status(self.config.general.server_id)
            if server_status and not server_status['registered']:
                # re-register server
                server_id = self.api.server.register_server(
                    ip_addresses=','.join([get_internal_ip()]),
                    web_protocol=('http', 'https')[self.config.general.enable_https],
                    web_port=self.config.general.web_port,
                    web_root=self.config.general.web_root,
                    server_version=sickrage.version(),
                )

                if server_id:
                    self.log.info('Re-registered SiCKRAGE server with SiCKRAGE API')
                    sentry_sdk.set_tag('server_id', self.config.general.server_id)
                    self.config.general.server_id = server_id
                    self.config.save(mark_dirty=True)
            else:
                self.log.debug('Updating SiCKRAGE server data on SiCKRAGE API')

                # update server information
                self.api.server.update_server(
                    server_id=self.config.general.server_id,
                    ip_addresses=','.join([get_internal_ip()]),
                    web_protocol=('http', 'https')[self.config.general.enable_https],
                    web_port=self.config.general.web_port,
                    web_root=self.config.general.web_root,
                    server_version=sickrage.version(),
                )

    def load_shows(self):
        threading.currentThread().setName('CORE')

        session = self.main_db.session()

        self.log.info('Loading initial shows list')

        self.loading_shows = True

        self.shows = {}
        for query in session.query(MainDB.TVShow).with_entities(MainDB.TVShow.series_id, MainDB.TVShow.series_provider_id, MainDB.TVShow.name,
                                                                MainDB.TVShow.location):
            try:
                # if not os.path.isdir(query.location) and self.config.general.create_missing_show_dirs:
                #     make_dir(query.location)

                self.log.info('Loading show {}'.format(query.name))
                self.shows.update({(query.series_id, query.series_provider_id): TVShow(query.series_id, query.series_provider_id)})
            except Exception as e:
                self.log.debug('There was an error loading show: {}'.format(query.name))

        self.loading_shows = False

        self.log.info('Loading initial shows list finished')

    def startup_message(self):
        self.log.info("SiCKRAGE :: STARTED")
        self.log.info(f"SiCKRAGE :: APP VERSION:[{sickrage.version()}]")
        self.log.info(f"SiCKRAGE :: CONFIG VERSION:[v{self.config.db.version}]")
        self.log.info(f"SiCKRAGE :: DATABASE VERSION:[v{self.main_db.version}]")
        self.log.info(f"SiCKRAGE :: DATABASE TYPE:[{self.db_type}]")
        self.log.info(f"SiCKRAGE :: INSTALL TYPE:[{self.version_updater.updater.type}]")
        self.log.info(
            f"SiCKRAGE :: URL:[{('http', 'https')[self.config.general.enable_https]}://{(get_internal_ip(), self.web_host)[self.web_host not in ['', '0.0.0.0']]}:{self.config.general.web_port}/{self.config.general.web_root.lstrip('/')}]")

    def launch_browser(self):
        if not self.no_launch and self.config.general.launch_browser:
            launch_browser(protocol=('http', 'https')[self.config.general.enable_https],
                           host=(get_internal_ip(), self.web_host)[self.web_host != ''],
                           startport=self.config.general.web_port)

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS {}!!!'.format(('SHUTTING DOWN', 'RESTARTING')[restart]))

            # shutdown scheduler
            if self.scheduler:
                try:
                    self.scheduler.shutdown()
                except (SchedulerNotRunningError, RuntimeError):
                    pass

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # stop queues
            self.search_queue.shutdown()
            self.show_queue.shutdown()
            self.postprocessor_queue.shutdown()

            # stop amqp consumer
            self.amqp_consumer.stop()

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save shows
            self.log.info('Saving all shows to the database')
            for show in self.shows.values():
                show.save()

            # save settings
            self.config.save()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if self.daemon:
            self.daemon.stop()

        self.started = False

    def restart(self):
        self.shutdown(restart=True)

    def shutdown_trigger(self):
        if not self.started:
            IOLoop.current().stop()
Exemplo n.º 21
0
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3
second intervals.
"""

from datetime import datetime
import os

from tornado.ioloop import IOLoop, PeriodicCallback
from apscheduler.schedulers.tornado import TornadoScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


def job():
    print('Every Day Job! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.add_job(job, 'cron', hour=10, minute=25)
    scheduler.start()
    PeriodicCallback(job, 2 * 1000).start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 22
0
class MurakamiServer:
    """
    *MurakamiServer* is responsible for loading all test runner and result
    exporter plugins, as well as managing the main eventloop and loading
    WebThings if desired.

    ####Keyword arguments:
    * `port`: port that WebThingsServer listens on
    * `hostname`: this WebThingServer's hostname
    * `ssl_options`: any TLS options to supply to WebThingServer
    * `additonal_routes`: routes to add to the WebThingServer
    * `base_path`: path to add to URL where we're listening in case we're
    behind a proxy
    * `tests_per_day`: number of tests to run in a day
    * `location`: string describing physical location of this device
    * `network_type`: string describing the network this device is connected to
    * `connection_type`: string describing type of connection this device is
    using
    """
    def __init__(
        self,
        port=defaults.HTTP_PORT,
        hostname=None,
        ssl_options=None,
        additional_routes=None,
        base_path="",
        tests_per_day=defaults.TESTS_PER_DAY,
        immediate=False,
        webthings=False,
        location=None,
        network_type=None,
        connection_type=None,
        device_id=None,
        config=None,
    ):
        self._runners = {}
        self._exporters = {}

        self._scheduler = None
        self._server = None

        self._port = port
        self._hostname = hostname
        self._ssl_options = ssl_options
        self._additional_routes = additional_routes
        self._base_path = base_path
        self._tests_per_day = tests_per_day
        self._immediate = immediate
        self._webthings = webthings
        self._location = location
        self._network_type = network_type
        self._connection_type = connection_type
        self._device_id = device_id
        self._config = config

    def _call_runners(self):
        for r in self._runners.values():
            _logger.info("Running test: %s", r.title)
            try:
                r.start_test()
            except Exception as exc:
                _logger.error("Failed to run test %s: %s", r.title, str(exc))

    def _call_exporters(self, test_name="", data="", timestamp=None):
        for e in self._exporters.values():
            _logger.info("Running exporter %s for test %s", e.name, test_name)
            try:
                e.push(test_name, data, timestamp)
            except Exception as exc:
                _logger.error("Failed to run exporter %s: %s", e.name,
                              str(exc))

    def _load_runners(self):
        trigger = RandomTrigger(tests_per_day=self._tests_per_day,
                                immediate=self._immediate)

        # Load test runners
        for entry_point in pkg_resources.iter_entry_points("murakami.runners"):
            logging.debug("Loading test runner %s", entry_point.name)
            if "tests" not in self._config:
                self._config["tests"] = {}
            if entry_point.name not in self._config["tests"]:
                self._config["tests"][entry_point.name] = {"enabled": False}
            self._runners[entry_point.name] = entry_point.load()(
                config=self._config["tests"][entry_point.name],
                data_cb=self._call_exporters,
                location=self._location,
                network_type=self._network_type,
                connection_type=self._connection_type,
                device_id=self._device_id,
            )

        # Start webthings server if enabled
        if self._webthings:
            self._server = WebThingServer(
                SingleThing(MurakamiThing(self._runners.values())),
                port=self._port,
                hostname=self._hostname,
                ssl_options=self._ssl_options,
                additional_routes=self._additional_routes,
                base_path=self._base_path,
            )

        # Start test scheduler if enabled
        if self._tests_per_day > 0:
            self._scheduler = TornadoScheduler()
            self._scheduler.add_job(self._call_runners,
                                    id="runners",
                                    name="Test Runners",
                                    trigger=trigger)

    def _load_exporters(self):
        self._exporters = {}
        # Check if exporters are enabled and load them.
        if "exporters" in self._config:
            exporters = pkg_resources.get_entry_map("murakami",
                                                    group="murakami.exporters")
            for name, entry in self._config["exporters"].items():
                logging.debug("Loading exporter %s", name)
                enabled = True
                if "enabled" in entry:
                    enabled = utils.is_enabled(entry["enabled"])
                if enabled:
                    if "type" in entry:
                        if entry["type"] in exporters:
                            self._exporters[name] = exporters[
                                entry["type"]].load()(
                                    name=name,
                                    location=self._location,
                                    network_type=self._network_type,
                                    connection_type=self._connection_type,
                                    config=entry,
                                )
                        else:
                            logging.error(
                                "No available exporter type %s, skipping.",
                                entry["type"],
                            )
                    else:
                        logging.error(
                            "No type defined for exporter %s, skipping.", name)
                else:
                    logging.debug("Exporter %s disabled, skipping.", name)

    def start(self):
        """Start MurakamiServer, including WebThingServer if directed."""
        _logger.info("Starting Murakami services.")
        self._load_runners()
        self._load_exporters()

        if self._scheduler is not None:
            _logger.info("Starting the job scheduler.")
            self._scheduler.start()
        if self._server is not None:
            _logger.info("Starting the WebThing server.")
            self._server.start()
        if self._scheduler is not None and self._server is None:
            IOLoop.current().start()

    def stop(self):
        """Stop MurakamiServer."""
        _logger.info("Stopping Murakami services.")

        IOLoop.current().stop()

        if self._scheduler is not None:
            _logger.info("Stopping the job scheduler.")
            self._scheduler.shutdown()
        if self._server is not None:
            _logger.info("Stopping the WebThing server.")
            self._server.stop()

        _logger.info("Cleaning up test runners.")

        for r in self._runners:
            self._runners[r].stop_test()
            self._runners[r].teardown()

    @gen.coroutine
    def reload(self, signum, frame, **kwargs):
        """Reload MurakamiServer, to be called as an event handler."""
        local_args = dict(locals())
        _logger.info("Reloading Murakami services...")
        for key, _ in local_args.items():
            if key in kwargs:
                setattr(self, key, kwargs[key])
        yield IOLoop.current().add_callback_from_signal(self.stop)
        self.start()
Exemplo n.º 23
0
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date, time, timedelta
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.triggers.interval import IntervalTrigger
import tornado.ioloop

from mylogger.mylogger import logger
import logging


def testfunc(a, b, c):
    logger.info('dongzhuoyao DOING: %s' % locals())


if __name__ == '__main__':
    sched = TornadoScheduler()
    sched.add_job(
        func=testfunc,
        args=(1, 2, 3),
        trigger=IntervalTrigger(
            start_date=datetime.combine(date.today(), time.max),  #today 23:59
            end_date=date.today() +
            timedelta(days=1e3),  #datetime.now() + timedelta(days=1),
            days=1,
        ),
        name='testfunc')

    sched.start()
    try:
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit) as e:
Exemplo n.º 24
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = None
        self.pid = os.getpid()
        self.showlist = []

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = (
            "1x02",
            "s01e02",
            "S01E02",
            "01x02",
            "S01 E02",
        )
        self.naming_multi_ep_type = {
            0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
            1: [" - " + x for x in self.naming_ep_type],
            2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
        }
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [
            language for language in os.listdir(sickrage.LOCALE_DIR)
            if '_' in language
        ]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.event_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq, ),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, ),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add rss cache updater job
        self.scheduler.add_job(self.rsscache_updater.run,
                               IntervalTrigger(minutes=15, ),
                               name=self.rsscache_updater.name,
                               id=self.rsscache_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # add namecache update job
        self.scheduler.add_job(self.name_cache.build_all,
                               IntervalTrigger(days=1, ),
                               name=self.name_cache.name,
                               id=self.name_cache.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all(
            [not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(
                ('http', 'https')[sickrage.app.config.enable_https], sickrage.
                app.config.web_host, sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # shutdown event queue
            if self.event_queue:
                self.log.debug("Shutting down event queue")
                self.event_queue.shutdown()
                del self.event_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        if self.io_loop:
            self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.save_to_db()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))

            try:
                self.log.debug("Loading data for show: [{}]".format(show.name))
                self.showlist.append(show)
                self.quicksearch_cache.add_show(show.indexerid)
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" %
                               (show.location, str(e)))
Exemplo n.º 25
0
class Core(object):
    def __init__(self):
        self.started = False
        self.loading_shows = False
        self.daemon = None
        self.io_loop = None
        self.pid = os.getpid()

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.shows = {}

        self.private_key = None
        self.public_key = None

        self.main_db = None
        self.cache_db = None

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.disable_updates = None
        self.web_port = None
        self.web_host = None
        self.web_root = None
        self.developer = None
        self.db_type = None
        self.db_prefix = None
        self.db_host = None
        self.db_port = None
        self.db_username = None
        self.db_password = None
        self.debug = None
        self.newest_version_string = None

        self.oidc_client_id = 'sickrage-app'
        self.oidc_client_secret = '5d4710b2-ca70-4d39-b5a3-0705e2c5e703'

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = (
            "1x02",
            "s01e02",
            "S01E02",
            "01x02",
            "S01 E02",
        )
        self.naming_multi_ep_type = {
            0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
            1: [" - " + x for x in self.naming_ep_type],
            2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
        }
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [
            language for language in os.listdir(sickrage.LOCALE_DIR)
            if '_' in language
        ]
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.notifier_providers = {}
        self.metadata_providers = {}
        self.search_providers = {}
        self.adba_connection = None
        self.log = None
        self.config = None
        self.alerts = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.failed_snatch_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None
        self.announcements = None
        self.api = None

    def start(self):
        self.started = True

        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.api = API()
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host,
                              self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host,
                                self.db_port, self.db_username,
                                self.db_password)
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()
        self.announcements = Announcements()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id=self.oidc_client_id,
            client_secret=self.oidc_client_secret)

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restore_app_data(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" %
                          ("FAILED", "SUCCESSFUL")[success])
            if success:
                self.main_db = MainDB(self.db_type, self.db_prefix,
                                      self.db_host, self.db_port,
                                      self.db_username, self.db_password)
                self.cache_db = CacheDB(self.db_type, self.db_prefix,
                                        self.db_host, self.db_port,
                                        self.db_username, self.db_password)
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # init encryption public and private keys
        encryption.initialize()

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # perform integrity check
            self.log.info("Performing integrity check on {} database".format(
                db.name))
            db.integrity_check()

            # migrate database
            self.log.info("Performing migrations on {} database".format(
                db.name))
            db.migrate()

            # sync database repo
            self.log.info("Performing sync on {} database".format(db.name))
            db.sync_db_repo()

            # cleanup
            self.log.info("Performing cleanup on {} database".format(db.name))
            db.cleanup()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # load quicksearch cache
        self.quicksearch_cache.load()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq,
                                   timezone='utc'),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, timezone='utc'),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour),
                                   timezone='utc'),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add rss cache updater job
        self.scheduler.add_job(self.rsscache_updater.run,
                               IntervalTrigger(minutes=15, timezone='utc'),
                               name=self.rsscache_updater.name,
                               id=self.rsscache_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30),
                            timezone='utc'),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(self.auto_postprocessor.run,
                               IntervalTrigger(
                                   minutes=self.config.autopostprocessor_freq,
                                   timezone='utc'),
                               name=self.auto_postprocessor.name,
                               id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(self.proper_searcher.run,
                               IntervalTrigger(minutes={
                                   '15m': 15,
                                   '45m': 45,
                                   '90m': 90,
                                   '4h': 4 * 60,
                                   'daily': 24 * 60
                               }[self.config.proper_searcher_interval],
                                               timezone='utc'),
                               name=self.proper_searcher.name,
                               id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1, timezone='utc'),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(self.subtitle_searcher.run,
                               IntervalTrigger(
                                   hours=self.config.subtitle_searcher_freq,
                                   timezone='utc'),
                               name=self.subtitle_searcher.name,
                               id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime,
                            timezone='utc'),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # add announcements job
        self.scheduler.add_job(self.announcements.run,
                               IntervalTrigger(minutes=15, timezone='utc'),
                               name=self.announcements.name,
                               id=self.announcements.name)

        # start queues
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # fire off startup events
        self.io_loop.add_callback(self.load_shows)
        self.io_loop.add_callback(self.version_updater.run)
        self.io_loop.add_callback(self.tz_updater.run)
        self.io_loop.add_callback(self.announcements.run)

        # start scheduler service
        self.scheduler.start()

        # start web server
        self.wserver.start()

        # launch browser window
        if all(
            [not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.io_loop.add_callback(
                functools.partial(launch_browser,
                                  ('http',
                                   'https')[sickrage.app.config.enable_https],
                                  sickrage.app.config.web_host,
                                  sickrage.app.config.web_port))

        def started():
            threading.currentThread().setName('CORE')
            self.log.info("SiCKRAGE :: STARTED")
            self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(
                sickrage.version()))
            self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(
                self.config.config_version))
            self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(
                self.main_db.version))
            self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(
                self.db_type))
            self.log.info("SiCKRAGE :: URL:[{}://{}:{}{}]".format(
                ('http', 'https')[self.config.enable_https],
                self.config.web_host, self.config.web_port,
                self.config.web_root))

        # start io_loop
        self.io_loop.add_callback(started)
        self.io_loop.start()

    def load_shows(self):
        threading.currentThread().setName('CORE')

        session = self.main_db.session()

        self.log.info('Loading initial shows list')

        self.loading_shows = True

        self.shows = {}
        for query in session.query(MainDB.TVShow).with_entities(
                MainDB.TVShow.indexer_id, MainDB.TVShow.indexer,
                MainDB.TVShow.name):
            try:
                self.log.info('Loading show {} and building caches'.format(
                    query.name))
                self.shows.update({
                    (query.indexer_id, query.indexer):
                    TVShow(query.indexer_id, query.indexer)
                })
                self.quicksearch_cache.add_show(query.indexer_id)
            except Exception as e:
                self.log.debug('There was an error loading show: {}'.format(
                    query.name))

        self.loading_shows = False

        self.log.info('Loading initial shows list finished')

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save shows
            self.log.info('Saving all shows to the database')
            for show in self.shows.values():
                show.save()

            # save settings
            self.config.save()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        if self.io_loop:
            self.io_loop.stop()
Exemplo n.º 26
0
def on_tick():
    tick_logger = logging.getLogger(__name__)
    try:
        last = snapshotter.snap_last_trade('BTC-USD')
        rxt = datetime.datetime.now(pytz.utc)
        px = float(last['price'])
        qty = float(last['size'])
        last_row = [{'index': rxt, 'price': px, 'qty': qty}]
        tick_lib.write('BTC-USD', last_row, metadata={'source': 'CoinbasePro'})
        tick_logger.info("wrote latest trade to Arctic: {} @ {}".format(
            qty, px))
    except CoinbaseAPIError:
        tick_logger.info(
            "ignoring transient error from Coinbase Pro API; will retry")


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    scheduler.add_jobstore(MemoryJobStore())
    scheduler.add_executor(TornadoExecutor())

    scheduler.add_job(on_tick, 'interval', minutes=1)
    scheduler.start()

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        logger.info("starting Tornado")
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 27
0
 def __init__(self, sqlalchemy_engine):
     self.scheduler = TornadoScheduler()
     self.scheduler.add_jobstore("sqlalchemy", engine=sqlalchemy_engine)
Exemplo n.º 28
0
def tick():
    global number
    number+=1
    print('Tick! The number is {0}'.format(number))


def make_app():
    return tornado.web.Application([
        (r"/", MainHandler),
    ])


if __name__ == "__main__":

    scheduler = TornadoScheduler()

    jobstores = {
        'default': MemoryJobStore()
    }
    executors = {
        'default': TornadoExecutor(max_workers=5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }

    scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

    scheduler.add_job(tick, 'interval', seconds=3)
Exemplo n.º 29
0
    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()
Exemplo n.º 30
0
#import logging

from tornado.options import  options,define 


define("port",default=8082,help="server listen port", type=int)
define("dbinit",default=0,help="if need to init db, 0:No; 1:Yes", type=int)

APP_USER_COOKIE_NAME = "_user__"
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif','doc','docx','pdf','txt','xlsx'])

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
log_file_path = os.path.join(BASE_DIR, 'logs/auth.log')
#err_file_path = os.path.join(BASE_DIR, 'logs/err.log')
sched = TornadoScheduler()


# logger.remove()
logger.add(sys.stderr, colorize=True, format="<green>{time}</green> <level>{message}</level>", filter="auth")
logger.add(log_file_path, rotation="1 days", colorize=True, format="<green>{time}</green> <level>{message}</level>", encoding='utf-8', filter="auth", level="DEBUG")
#logger.add(err_file_path, rotation="1 days", colorize=True, format="<green>{time}</green> <level>{message}</level>", encoding='utf-8', filter="auth", level="INFO")


def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS

class BaseHandler(tornado.web.RequestHandler):
    async def prepare(self):
         # get_current_user cannot be a coroutine, so set
         # self.current_user in prepare instead.
Exemplo n.º 31
0

executors = {
    'default': ThreadPoolExecutor(1),
    'processpool': ProcessPoolExecutor(1)
}

job_defaults = {
    'coalesce': False,
    'max_instances': 1,
    'misfire_grace_time': 60 * 60 * 20,
}

db_info = settings.db

jobstores = {
    "default":
    SQLAlchemyJobStore(url="mysql://%s:%s@%s/%s" %
                       (db_info["user"], db_info["password"], db_info["host"],
                        db_info["db_name"]))
}

scheduler = TornadoScheduler(jobstores=jobstores,
                             executors=executors,
                             job_defaults=job_defaults)
# scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
# print "scheduler starting"
# scheduler.start()
# print "scheduler started"
# scheduler.add_job(print_datetime, 'interval', id="xh-job-1", seconds=2, replace_existing=True)
Exemplo n.º 32
0
# jobstores = {
#     "default": SQLAlchemyJobStore(DbStore)
# }
executors = {
    "default": ThreadPoolExecutor(10),  # 线程池
    #     "processpool": ProcessPoolExecutor(5)  # 进程池
}
job_defaults = {
    "coalesce": True,  # 只执行一次
    "max_instances": 1  # 最大实例数量
}

# 实例化任务调度器TornadoScheduler
scheduler = TornadoScheduler(
    daemonic=False,  # 不使用守护进程
    #     jobstores=jobstores, #默认使用内存
    executors=executors,
    job_defaults=job_defaults)

# 全局可用
AutoReportGlobals.Scheduler = scheduler
AutoReportGlobals.EmailIds = {}


def onListener(event):
    if event.exception:
        logger.warn("The job crashed :( " + str(event))


#         print("The job crashed :(", event.alias, event.code, event.exception,
# event.job_id, event.jobstore, event.retval, event.scheduled_run_time)
Exemplo n.º 33
0
class SchedulerManager():
    def __init__(self, config=None, syncobj=None):
        if config is None:
            config = Config()
        self.config = config
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        self.scheduler = TornadoScheduler(executors=executors)
        self.task_queue = Queue()
        self.poll_task_queue_callback = None
        self.pool_task_queue_interval = 10
        self.ioloop = IOLoop.instance()
        self.poll_task_queue_callback = PeriodicCallback(
            self.poll_task_queue, self.pool_task_queue_interval * 1000)
        self.clear_finished_jobs_callback = PeriodicCallback(
            self.clear_finished_jobs, 60 * 1000)
        self.reset_timeout_job_callback = PeriodicCallback(
            self.reset_timeout_job, 10 * 1000)

        self.sync_obj = syncobj
        if syncobj is not None:
            self.sync_obj.set_on_remove_schedule_job(
                self.on_cluster_remove_scheduling_job)
            self.sync_obj.set_on_add_schedule_job(
                self.on_cluster_add_scheduling_job)

    def init(self):
        session = Session()

        # move completed jobs into history
        for job in session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.status.in_((2, 3))):
            historical_job = HistoricalJob()
            historical_job.id = job.id
            historical_job.spider_id = job.spider_id
            historical_job.project_name = job.project_name
            historical_job.spider_name = job.spider_name
            historical_job.fire_time = job.fire_time
            historical_job.start_time = job.start_time
            historical_job.complete_time = job.update_time
            historical_job.status = job.status
            session.delete(job)
            session.add(historical_job)
        session.commit()

        # init triggers
        triggers = session.query(Trigger)
        for trigger in triggers:
            try:
                self.add_job(trigger.id, trigger.cron_pattern)
            except InvalidCronExpression:
                logger.warning('Trigger %d,%s cannot be added ' %
                               (trigger.id, trigger.cron_pattern))
        session.close()

        self.scheduler.start()

        self.poll_task_queue_callback.start()
        self.clear_finished_jobs_callback.start()
        self.reset_timeout_job_callback.start()

    def poll_task_queue(self):
        if self.task_queue.empty():
            with session_scope() as session:
                tasks_to_run = session.query(SpiderExecutionQueue).filter_by(
                    status=0).order_by(SpiderExecutionQueue.update_time).slice(
                        0, 10)
                for task in tasks_to_run:
                    self.task_queue.put(task)

    def build_cron_trigger(self, cron):
        cron_parts = cron.split(' ')
        if len(cron_parts) != 5:
            raise InvalidCronExpression()

        try:
            crontrigger = CronTrigger(
                minute=cron_parts[0],
                hour=cron_parts[1],
                day=cron_parts[2],
                month=cron_parts[3],
                day_of_week=cron_parts[4],
            )
            return crontrigger
        except ValueError:
            raise InvalidCronExpression()

    def add_job(self, trigger_id, cron):
        logger.debug('adding trigger %s %s' % (trigger_id, cron))
        crontrigger = self.build_cron_trigger(cron)

        job = self.scheduler.add_job(func=self.trigger_fired,
                                     trigger=crontrigger,
                                     kwargs={'trigger_id': trigger_id},
                                     id=str(trigger_id),
                                     replace_existing=True)
        if self.sync_obj:
            self.ioloop.call_later(0, self.sync_obj.add_schedule_job,
                                   trigger_id)
            #self.sync_obj.add_schedule_job(trigger_id)

    def on_cluster_remove_scheduling_job(self, job_id):
        logger.debug('on_cluster_remove_scheduling_job')
        if self.scheduler.get_job(job_id):
            self.scheduler.remove_job(job_id)

    def on_cluster_add_scheduling_job(self, trigger_id):
        logger.debug('on_cluster_add_scheduling_job')
        with session_scope() as session:
            trigger = session.query(Trigger).filter_by(id=trigger_id).first()
            if trigger is None:
                return
            crontrigger = self.build_cron_trigger(trigger.cron_pattern)
            job = self.scheduler.add_job(func=self.trigger_fired,
                                         trigger=crontrigger,
                                         kwargs={'trigger_id': trigger_id},
                                         id=str(trigger_id),
                                         replace_existing=True)

    def trigger_fired(self, trigger_id):
        with session_scope() as session:
            trigger = session.query(Trigger).filter_by(id=trigger_id).first()
            if not trigger:
                logger.error('Trigger %s not found.' % trigger_id)
                return
            spider = session.query(Spider).filter_by(
                id=trigger.spider_id).first()
            project = session.query(Project).filter_by(
                id=spider.project_id).first()
            executing = session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.spider_id == spider.id,
                SpiderExecutionQueue.status.in_([0, 1]))
            concurrency_setting = session.query(SpiderSettings).filter_by(
                spider_id=spider.id, setting_key='concurrency').first()
            concurrency = int(
                concurrency_setting.value) if concurrency_setting else 1
            executing_slots = [
                executing_job.slot for executing_job in executing
            ]
            free_slots = [
                x for x in range(1, concurrency + 1)
                if x not in executing_slots
            ]
            if not free_slots:
                logger.warning(
                    'spider %s-%s is configured as %d concurency, and %d in queue, skipping'
                    % (project.name, spider.name, concurrency,
                       len(executing_slots)))
                return

            executing = SpiderExecutionQueue()
            executing.id = generate_job_id()
            executing.spider_id = spider.id
            executing.project_name = project.name
            executing.spider_name = spider.name
            executing.fire_time = datetime.datetime.now()
            executing.update_time = datetime.datetime.now()
            executing.slot = free_slots[0]
            session.add(executing)
            try:
                session.commit()
            except (Exception, IntegrityError) as e:
                logger.warning(e)
            session.close()
            return

    def add_schedule(self, project, spider, cron):
        with session_scope() as session:
            triggers = session.query(Trigger).filter(
                Trigger.spider_id == spider.id)
            found = False
            for trigger in triggers:
                if trigger.cron_pattern == cron:
                    found = True
                    break

            if not found:
                # create a cron_trigger for just validating
                cron_trigger = self.build_cron_trigger(cron)
                trigger = Trigger()
                trigger.spider_id = spider.id
                trigger.cron_pattern = cron
                session.add(trigger)
                session.commit()
                self.add_job(trigger.id, cron)

    def add_task(self, project_name, spider_name):
        session = Session()
        project = session.query(Project).filter(
            Project.name == project_name).first()
        spider = session.query(Spider).filter(
            Spider.name == spider_name,
            Spider.project_id == project.id).first()

        try:
            existing = list(
                session.query(SpiderExecutionQueue).filter(
                    SpiderExecutionQueue.spider_id == spider.id,
                    SpiderExecutionQueue.status.in_([0, 1])))
            if existing:
                logger.warning('job %s_%s is running, ignoring schedule' %
                               (project.name, spider.name))
                raise JobRunning(existing[0].id)
            executing = SpiderExecutionQueue()
            jobid = generate_job_id()
            executing.id = jobid
            executing.spider_id = spider.id
            executing.project_name = project.name
            executing.spider_name = spider.name
            executing.fire_time = datetime.datetime.now()
            executing.update_time = datetime.datetime.now()
            session.add(executing)
            session.commit()
            session.refresh(executing)
            return executing
        finally:
            session.close()

    def on_node_expired(self, node_id):
        session = Session()
        for job in session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.node_id == node_id,
                SpiderExecutionQueue.status == 1):
            job.status = 0
            job.update_time = datetime.datetime.now()
            job.start_time = None
            job.pid = None
            job.node_id = None
            session.add(job)
        session.commit()
        session.close()

    def jobs(self):
        session = Session()
        pending = list(
            session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.status == 0))
        running = list(
            session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.status == 1))
        finished = list(session.query(HistoricalJob).slice(0, 100))
        session.close()
        return pending, running, finished

    def job_start(self, jobid, pid):
        with session_scope() as session:
            job = session.query(SpiderExecutionQueue).filter_by(
                id=jobid).first()
            if job.start_time is None:
                job.start_time = datetime.datetime.now()
            job.update_time = datetime.datetime.now()
            if job.pid is None and pid:
                job.pid = pid
            session.add(job)
            session.commit()
            session.close()

    def get_next_task(self, node_id):
        if not self.task_queue.empty():
            session = Session()
            try:
                next_task = self.task_queue.get_nowait()
            except Empty:
                return None
            next_task = session.query(SpiderExecutionQueue).filter(
                SpiderExecutionQueue.id == next_task.id,
                SpiderExecutionQueue.status == 0).first()
            if not next_task:
                return None
            next_task.start_time = datetime.datetime.now()
            next_task.update_time = datetime.datetime.now()
            next_task.node_id = node_id
            next_task.status = 1
            session.add(next_task)
            session.commit()
            session.refresh(next_task)
            session.close()
            return next_task
        return None

    def has_task(self):
        return not self.task_queue.empty()

    def jobs_running(self, node_id, job_ids):
        '''

        :param node_id:
        :param job_ids:
        :return:(job_id) to kill
        '''
        with session_scope() as session:
            for job_id in job_ids:
                job = session.query(SpiderExecutionQueue).filter(
                    SpiderExecutionQueue.id == job_id).first()

                if job:
                    if job.node_id is None:
                        job.node_id = node_id
                    if job.node_id != node_id or \
                        job.status != 1:
                        yield job.id
                    else:
                        job.update_time = datetime.datetime.now()
                        session.add(job)
                else:
                    yield job_id
            session.commit()

    def job_finished(self, job, log_file=None, items_file=None):
        session = Session()
        if job.status not in (2, 3):
            raise Exception('Invliad status.')
        job_status = job.status
        job = session.query(SpiderExecutionQueue).filter_by(id=job.id).first()
        job.status = job_status
        job.update_time = datetime.datetime.now()

        historical_job = HistoricalJob()
        historical_job.id = job.id
        historical_job.spider_id = job.spider_id
        historical_job.project_name = job.project_name
        historical_job.spider_name = job.spider_name
        historical_job.fire_time = job.fire_time
        historical_job.start_time = job.start_time
        historical_job.complete_time = job.update_time
        historical_job.status = job.status
        if log_file:
            historical_job.log_file = log_file
            import re
            items_crawled_pattern = re.compile(
                "\'item_scraped_count\': (\d+),")
            error_log_pattern = re.compile("\'log_count/ERROR\': (\d+),")
            warning_log_pattern = re.compile("\'log_count/WARNING\': (\d+),")
            with open(log_file, 'r') as f:
                log_content = f.read()
                m = items_crawled_pattern.search(log_content)
                if m:
                    historical_job.items_count = int(m.group(1))

                m = error_log_pattern.search(log_content)
                if m and historical_job.status == JOB_STATUS_SUCCESS:
                    historical_job.status = JOB_STATUS_FAIL
                m = warning_log_pattern.search(log_content)
                if m and historical_job.status == JOB_STATUS_SUCCESS:
                    historical_job.status = JOB_STATUS_WARNING

        if items_file:
            historical_job.items_file = items_file
        session.delete(job)
        session.add(historical_job)
        session.commit()
        session.refresh(historical_job)

        # send mail
        if historical_job.status == JOB_STATUS_FAIL:
            self.try_send_job_failed_mail(historical_job)

        session.close()
        return historical_job

    def try_send_job_failed_mail(self, job):
        logger.debug('try_send_job_failed_mail')
        job_fail_send_mail = self.config.getboolean('job_fail_send_mail')
        if job_fail_send_mail:
            try:
                mail_sender = MailSender(self.config)
                subject = 'scrapydd job failed'
                to_address = self.config.get('job_fail_mail_receiver')
                content = 'bot:%s \r\nspider:%s \r\n job_id:%s \r\n' % (
                    job.spider.project.name, job.spider_name, job.id)
                mail_sender.send(to_addresses=to_address,
                                 subject=subject,
                                 content=content)

            except Exception as e:
                logger.error('Error when sending job_fail mail %s' % e)

    def clear_finished_jobs(self):
        job_history_limit_each_spider = 100
        with session_scope() as session:
            spiders = list(session.query(distinct(HistoricalJob.spider_id)))
        for row in spiders:
            spider_id = row[0]
            with session_scope() as session:
                over_limitation_jobs = list(session.query(HistoricalJob)\
                    .filter(HistoricalJob.spider_id==spider_id)\
                    .order_by(desc(HistoricalJob.complete_time))\
                    .slice(job_history_limit_each_spider, 1000)\
                    .all())
            for over_limitation_job in over_limitation_jobs:
                self._remove_histical_job(over_limitation_job)

    def _clear_running_jobs(self):
        with session_scope() as session:
            jobs = list(
                session.query(SpiderExecutionQueue).filter(
                    SpiderExecutionQueue.status.in_([0, 1])))
        for job in jobs:
            self._remove_histical_job(job)

    def reset_timeout_job(self):
        with session_scope() as session:
            timeout_time = datetime.datetime.now() - datetime.timedelta(
                minutes=1)
            for job in session.query(SpiderExecutionQueue).filter(
                    SpiderExecutionQueue.status == 1):
                spider = session.query(Spider).filter_by(
                    id=job.spider_id).first()
                job_timeout_setting = session.query(SpiderSettings).filter_by(
                    spider_id=spider.id, setting_key='timeout').first()
                job_timeout = int(
                    job_timeout_setting.value) if job_timeout_setting else 3600
                logger.debug((job.update_time - job.start_time).seconds)
                if job.update_time < timeout_time:
                    # job is not refresh as expected, node might be died, reset the status to PENDING
                    job.status = 0
                    job.pid = None
                    job.node_id = None
                    job.update_time = datetime.datetime.now()
                    session.add(job)
                    logger.info('Job %s is timeout, reseting.' % job.id)
                elif (job.update_time - job.start_time).seconds > job_timeout:
                    # job is running too long, should be killed

                    historical_job = HistoricalJob()
                    historical_job.id = job.id
                    historical_job.spider_id = job.spider_id
                    historical_job.project_name = job.project_name
                    historical_job.spider_name = job.spider_name
                    historical_job.fire_time = job.fire_time
                    historical_job.start_time = job.start_time
                    historical_job.complete_time = job.update_time
                    historical_job.status = 3
                    session.delete(job)
                    session.add(historical_job)
                    logger.info('Job %s is timeout, killed.' % job.id)
            session.commit()

    def _remove_histical_job(self, job):
        '''
        @type job: HistoricalJob
        '''

        with session_scope() as session:
            job = session.query(HistoricalJob).filter(
                HistoricalJob.id == job.id).first()
            if job.items_file:
                try:
                    os.remove(job.items_file)
                except Exception as e:
                    logger.warning(e.message)

            if job.log_file:
                try:
                    os.remove(job.log_file)
                except Exception as e:
                    logger.warning(e.message)

            original_log_file = os.path.join('logs', job.project_name,
                                             job.spider_name,
                                             '%s.log' % job.id)
            if os.path.exists(original_log_file):
                os.remove(original_log_file)

            original_items_file = os.path.join('items', job.project_name,
                                               job.spider_name,
                                               '%s.jl' % job.id)
            if os.path.exists(original_items_file):
                os.remove(original_items_file)
            session.delete(job)

    def remove_schedule(self, project_name, spider_name, trigger_id):
        with session_scope() as session:
            project = session.query(Project).filter(
                Project.name == project_name).first()
            spider = session.query(Spider).filter(
                Spider.project_id == project.id,
                Spider.name == spider_name).first()
            trigger = session.query(Trigger).filter(
                Trigger.spider_id == spider.id,
                Trigger.id == trigger_id).first()

            session.delete(trigger)
            if self.scheduler.get_job(str(trigger_id)):
                self.scheduler.remove_job(str(trigger.id))

        if self.sync_obj:
            logger.info('remove_schedule')
            self.sync_obj.remove_schedule_job(trigger.id)
Exemplo n.º 34
0
    sys.path.append(os.path.join(root_path, 'api'))
    sys.path.append(root_path)
    os.environ.setdefault("DJANGO_SETTINGS_MODULE", "alpha.settings.prod")
    django.setup()

    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s [%(levelname)s] %(name)s:%(lineno)d %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    logging.info((' Init Django with profile: %s ' %
                  os.environ.get('DJANGO_SETTINGS_MODULE')).center(80, '-'))


if __name__ == '__main__':
    init_project()
    scheduler = TornadoScheduler()

    # 设置 apscheduler 的 logging
    class AddedFilter(logging.Filter):
        def filter(self, record):
            return not record.msg.startswith(
                'Adding') and not record.msg.startswith('Added')

    logging.getLogger("apscheduler.scheduler").addFilter(AddedFilter())

    if platform().startswith('Windows'):
        from wind_spider import WindSpider

        # 命令行解析万德账号
        import argparse
        parser = argparse.ArgumentParser(description='运行爬虫')
Exemplo n.º 35
0
import time
import tornado.ioloop
from apscheduler.triggers.date import DateTrigger
from apscheduler.schedulers.tornado import TornadoScheduler

sched = TornadoScheduler()


def child_job():
    """创建一个执行时间为 60 s 的任务"""
    print("start")
    time.sleep(60)
    print("end")


def main_job():
    # sched.add_job(child_job, trigger=DateTrigger(), id="123")
    sched.add_job(child_job, max_instances=10, trigger=DateTrigger(), id="123")


if __name__ == "__main__":
    # 每 5 s 执行一次任务
    sched.add_job(main_job, 'interval', seconds=5)
    sched.start()
    tornado.ioloop.IOLoop.instance().start()
'''问题复现: 
python sche1.py         
start
WARNING:apscheduler.scheduler:Execution of job "child_job (trigger: date[2020-05-07 11:15:49 CST], next run at: 2020-05-07 11:15:49 CST)" skipped: maximum number of running instances reached (1)
WARNING:apscheduler.scheduler:Execution of job "child_job (trigger: date[2020-05-07 11:15:54 CST], next run at: 2020-05-07 11:15:54 CST)" skipped: maximum number of running instances reached (1)
Exemplo n.º 36
0
class Core(object):
    def __init__(self):
        self.started = False
        self.io_loop = IOLoop.current()

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.logFile = self.srConfig.LOG_FILE
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE

        # start logger
        self.srLogger.start()

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                sickrage.restart = False
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [self.mainDB, self.cacheDB, self.failedDB]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time(
        ) - 604800:  # 7 days
            self.mainDB.compact()
            self.srConfig.LAST_DB_COMPACT = int(time.time())

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time(
        )
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'days':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add show next episode job
        self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="SHOWNEXTEP",
                                 id="SHOWNEXTEP")

        # add daily search job
        self.srScheduler.add_job(self.DAILYSEARCHER.run,
                                 srIntervalTrigger(
                                     **{
                                         'minutes':
                                         self.srConfig.DAILY_SEARCHER_FREQ,
                                         'min':
                                         self.srConfig.MIN_DAILY_SEARCHER_FREQ,
                                         'start_date':
                                         datetime.datetime.now() +
                                         datetime.timedelta(minutes=4)
                                     }),
                                 name="DAILYSEARCHER",
                                 id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes':
                    self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min':
                    self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
                    'start_date':
                    datetime.datetime.now() + datetime.timedelta(minutes=30)
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

        self.srLogger.info("SiCKRAGE :: STARTED")
        self.srLogger.info("SiCKRAGE :: VERSION:[{}]".format(
            self.VERSIONUPDATER.version))
        self.srLogger.info("SiCKRAGE :: CONFIG:[{}] [v{}]".format(
            sickrage.CONFIG_FILE, self.srConfig.CONFIG_VERSION))
        self.srLogger.info("SiCKRAGE :: URL:[{}://{}:{}/]".format(
            ('http', 'https')[self.srConfig.ENABLE_HTTPS],
            self.srConfig.WEB_HOST, self.srConfig.WEB_PORT))

        # launch browser window
        if all(
            [not sickrage.NOLAUNCH, sickrage.srCore.srConfig.LAUNCH_BROWSER]):
            threading.Thread(
                None, lambda: launch_browser(
                    ('http', 'https')[sickrage.srCore.srConfig.ENABLE_HTTPS],
                    self.srConfig.WEB_HOST, sickrage.srCore.srConfig.WEB_PORT)
            ).start()

        # start ioloop event handler
        self.io_loop.start()

    def shutdown(self):
        if self.started:
            self.started = False

            self.srLogger.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown/restart webserver
            self.srWebServer.shutdown()

            # shutdown show queue
            if self.SHOWQUEUE:
                self.srLogger.debug("Shutting down show queue")
                self.SHOWQUEUE.shutdown()

            # shutdown search queue
            if self.SEARCHQUEUE:
                self.srLogger.debug("Shutting down search queue")
                self.SEARCHQUEUE.shutdown()

            # log out of ADBA
            if sickrage.srCore.ADBA_CONNECTION:
                self.srLogger.debug("Logging out ANIDB connection")
                sickrage.srCore.ADBA_CONNECTION.logout()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.mainDB, self.cacheDB, self.failedDB]:
                db.close()

            # shutdown logging
            self.srLogger.close()

        # stop daemon process
        if not sickrage.restart and sickrage.daemon: sickrage.daemon.stop()

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        self.NAMECACHE.load()
        for dbData in [
                x['doc'] for x in self.mainDB.db.all('tv_shows', with_doc=True)
        ]:
            try:
                self.srLogger.debug("Loading data for show: [%s]",
                                    dbData['show_name'])
                show = TVShow(int(dbData['indexer']),
                              int(dbData['indexer_id']))
                if not sickrage.DEVELOPER:
                    show.nextEpisode()
                    self.NAMECACHE.build(show)
                self.SHOWLIST += [show]
            except Exception as e:
                self.srLogger.error("Show error in [%s]: %s" %
                                    (dbData['location'], e.message))
Exemplo n.º 37
0
            return not record.msg.startswith(
                'Adding') and not record.msg.startswith('Added')

    logging.getLogger("apscheduler.scheduler").addFilter(AddedFilter())

    import future_spider, spot_spider
    all_crawlers = []
    for spider in [future_spider, spot_spider]:
        submodules = inspect.getmembers(spider, inspect.ismodule)
        for submodule_name, submodule in submodules:
            crawlers = inspect.getmembers(
                submodule, lambda i: inspect.isfunction(i) and i.__name__.
                startswith('crawl_'))
            all_crawlers.extend(map(lambda i: i[1], crawlers))

    scheduler = TornadoScheduler()

    logging.info(
        '爬取周期配置默认60分钟爬取一次, 如要指定1小时爬一次请在爬虫方法名最后加上 __60m, 目前支持 m 为单位的周期爬取, 储存、覆盖逻辑由爬虫自己实现'
    )
    for crawler in all_crawlers:
        configuration = list(
            filter(lambda i: re.match(r'(\d+)(m)', i),
                   crawler.__name__.split('__')))
        if not configuration:
            try:
                crawler()
            except:
                pass
            scheduler.add_job(spider_wrapper(crawler),
                              'interval',
Exemplo n.º 38
0
class Core(object):
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # cpu count
        self.CPU_COUNT = cpu_count()

        # generate notifiers dict
        self.notifiersDict = AttrDict(libnotify=LibnotifyNotifier(),
                                      kodi_notifier=KODINotifier(),
                                      plex_notifier=PLEXNotifier(),
                                      emby_notifier=EMBYNotifier(),
                                      nmj_notifier=NMJNotifier(),
                                      nmjv2_notifier=NMJv2Notifier(),
                                      synoindex_notifier=synoIndexNotifier(),
                                      synology_notifier=synologyNotifier(),
                                      pytivo_notifier=pyTivoNotifier(),
                                      growl_notifier=GrowlNotifier(),
                                      prowl_notifier=ProwlNotifier(),
                                      libnotify_notifier=LibnotifyNotifier(),
                                      pushover_notifier=PushoverNotifier(),
                                      boxcar_notifier=BoxcarNotifier(),
                                      boxcar2_notifier=Boxcar2Notifier(),
                                      nma_notifier=NMA_Notifier(),
                                      pushalot_notifier=PushalotNotifier(),
                                      pushbullet_notifier=PushbulletNotifier(),
                                      freemobile_notifier=FreeMobileNotifier(),
                                      twitter_notifier=TwitterNotifier(),
                                      trakt_notifier=TraktNotifier(),
                                      email_notifier=EmailNotifier())

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                sickrage.restart = False
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [MainDB, CacheDB, FailedDB]:
            # initialize the database
            db().initialize()

            # migrate the database
            db().migrate()

            # compact the main database
            db().compact()

        # load data for shows from database
        self.load_shows()

        # build name cache
        self.NAMECACHE.build()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(
                    keepAlive=True,
                    log=lambda msg: self.srLogger.debug(
                        "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME,
                                                  self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [
            (self.srConfig.METADATA_KODI, kodi),
            (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
            (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
            (self.srConfig.METADATA_PS3, ps3),
            (self.srConfig.METADATA_WDTV, wdtv),
            (self.srConfig.METADATA_TIVO, tivo),
            (self.srConfig.METADATA_MEDE8ER, mede8er)
        ]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.NAMECACHE_FREQ,
                    'min': self.srConfig.MIN_NAMECACHE_FREQ
                }),
            name="NAMECACHE",
            id="NAMECACHE")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.DAILY_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ
                }),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.current().start()

    def shutdown(self):
        if self.started:
            self.started = False

            self.srLogger.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown/restart webserver
            self.srWebServer.shutdown()

            # shutdown scheduler
            self.srLogger.info("Shutting down scheduler")
            self.srScheduler.shutdown()

            # shutdown show queue
            if self.SHOWQUEUE:
                self.srLogger.info("Shutting down show queue")
                self.SHOWQUEUE.shutdown()

            # shutdown search queue
            if self.SEARCHQUEUE:
                self.srLogger.info("Shutting down search queue")
                self.SEARCHQUEUE.shutdown()

            # log out of ADBA
            if sickrage.srCore.ADBA_CONNECTION:
                self.srLogger.info("Logging out ANIDB connection")
                sickrage.srCore.ADBA_CONNECTION.logout()

            # save all show and config settings
            self.save_all()

            # shutdown logging
            self.srLogger.shutdown()

        # delete pid file
        if sickrage.DAEMONIZE:
            sickrage.delpid(sickrage.PID_FILE)

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [
                x['doc'] for x in MainDB().db.all('tv_shows', with_doc=True)
        ]:
            try:
                self.srLogger.debug("Loading data for show: [%s]",
                                    dbData['show_name'])
                show = TVShow(int(dbData['indexer']),
                              int(dbData['indexer_id']))
                show.nextEpisode()
                self.SHOWLIST += [show]
            except Exception as e:
                self.srLogger.error("Show error in [%s]: %s" %
                                    (dbData['location'], e.message))
Exemplo n.º 39
0
from apscheduler.schedulers.tornado import TornadoScheduler
from tornado import websocket, gen, httpclient, ioloop, web

from alphabot import help
from alphabot import memory

DEFAULT_SCRIPT_DIR = 'default-scripts'
DEBUG_CHANNEL = os.getenv('DEBUG_CHANNEL', 'alphabot')

WEB_PORT = int(os.getenv('WEB_PORT', 8000))
WEB_PORT_SSL = int(os.getenv('WEB_PORT_SSL', 8443))

log = logging.getLogger(__name__)
log_level = logging.getLevelName(os.getenv('LOG_LEVEL', 'INFO'))
log.setLevel(log_level)
scheduler = TornadoScheduler()
scheduler.start()


class AlphaBotException(Exception):
    """Top of hierarchy for all alphabot failures."""


class CoreException(AlphaBotException):
    """Used to signify a failure in the robot's core."""


class InvalidOptions(AlphaBotException):
    """Robot failed because input options were somehow broken."""

Exemplo n.º 40
0
def tornado_scheduler(io_loop):
    scheduler = TornadoScheduler(io_loop=io_loop)
    scheduler.start(paused=True)
    yield scheduler
    scheduler.shutdown(False)
Exemplo n.º 41
0
        # get all routes which belong to user or profile
        # TODO - include employees
        self.write( {"lol":"lmao"} )

@Route(r"/signals")
class SimpleHandler6(tornado.web.RequestHandler):
    def get(self):
        # TODO - get all routes which belong to
        self.write( {"lol":"lmao"} )

app = tornado.web.Application(Route.routes() + [
 (r'/send_message', SendMessageHandler)
] + sockjs.tornado.SockJSRouter(MessageHandler, '/sockjs').urls)

if __name__ == "__main__":
    app.listen(8988)
    #app.listen(8000)
    #app.listen(5000)
    #tornado.ioloop.IOLoop.current().add_callback(print_changes)
    tornado.ioloop.IOLoop.current().add_callback(company_name_to_domain_changes)
    tornado.ioloop.IOLoop.current().add_callback(trigger_changes)

    scheduler = TornadoScheduler()
    scheduler.add_job(AsyncCompanyNameResearch().start, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_company_info_research, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_employee_research, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_email_pattern_research, 'interval', seconds=1)
    scheduler.start()

    tornado.ioloop.IOLoop.current().start()
Exemplo n.º 42
0
    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq, ),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, ),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add rss cache updater job
        self.scheduler.add_job(self.rsscache_updater.run,
                               IntervalTrigger(minutes=15, ),
                               name=self.rsscache_updater.name,
                               id=self.rsscache_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # add namecache update job
        self.scheduler.add_job(self.name_cache.build_all,
                               IntervalTrigger(days=1, ),
                               name=self.name_cache.name,
                               id=self.name_cache.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all(
            [not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(
                ('http', 'https')[sickrage.app.config.enable_https], sickrage.
                app.config.web_host, sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()
Exemplo n.º 43
0
def main():
    # Running APScheduler
    aps = TornadoScheduler()
    aps.add_jobstore('mongodb', collection='example_jobs')
    aps.remove_all_jobs()
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.start()
    # Running server
    app = TornadoApplication()
    app.listen(options.port)
    tornado.ioloop.IOLoop.current().start()
Exemplo n.º 44
0
from tornado.ioloop import IOLoop
from tornado.web import Application, StaticFileHandler
from tornado.httpserver import HTTPServer
from cfg import service
from controller.action import *
from controller.open_action import *
from controller.pan_manage_action import *
from controller.user_action import *
from controller.pro_action import ProductHandler
from controller.middle_ware import get_middleware
from controller.async_action import AsyncHandler
from controller.main_action import MainHandler
from controller.wx.wxget import WXAppGet
from controller.wx.wxput import WXAppPut
from utils import log as logger
scheduler = TornadoScheduler()
scheduler.start()


guest_user = None
context = dict(guest=None)


def update_sys_cfg(release=True):
    try:
        open_service.sync_cfg()
        open_service.sync_tags()
        context['guest'] = open_service.guest_user()
    except Exception as e:
        print("update_sys_cfg err:", e)
        pass
Exemplo n.º 45
0
def main():

    application = tornado.web.Application([
        (r'/', IndexHandler),
        (r'/dash', DashHandler),
        (r'/test', TestHandler),
        (r'/google', AnalyticsHandler),
        (r'/assets/(.*)', tornado.web.StaticFileHandler, {'path': './assets'},),
        (r'/ws/', WebSocketHandler)
    ])

    parse_command_line()
    application.listen(options.port)

    sched = TornadoScheduler(daemon=True)
    atexit.register(lambda: sched.shutdown())
    sched.add_job(social_media_fetcher.instagram_counts, 'cron', minute="*/1")
    sched.add_job(social_media_fetcher.twitter_counts, 'cron', minute="*/1")
    sched.add_job(social_media_fetcher.pinterest_counts, 'cron', minute="*/5")
    sched.add_job(social_media_fetcher.youtube_counts, 'cron', minute="*/5")
    sched.add_job(social_media_fetcher.facebook_counts, 'cron', minute="*/1")
    # todo reinstate when keys inserted 
    # sched.add_job(social_media_fetcher.linkedin_count, 'cron', minute="*/5")
    # Google Analytics importer
    sched.add_job(analytic_fetcher.get_results, 'cron', hour="1", minute="1")
    sched.start()

    tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 46
0
from apscheduler.schedulers.tornado import TornadoScheduler
from tornado import websocket, gen, httpclient, ioloop, web

from alphabot import help
from alphabot import memory

DEFAULT_SCRIPT_DIR = 'default-scripts'
DEBUG_CHANNEL = os.getenv('DEBUG_CHANNEL', 'alphabot')

WEB_PORT = int(os.getenv('WEB_PORT', 8000))
WEB_PORT_SSL = int(os.getenv('WEB_PORT_SSL', 8443))

log = logging.getLogger(__name__)
log_level = logging.getLevelName(os.getenv('LOG_LEVEL', 'INFO'))
log.setLevel(log_level)
scheduler = TornadoScheduler()
scheduler.start()


class AlphaBotException(Exception):
    """Top of hierarchy for all alphabot failures."""


class CoreException(AlphaBotException):
    """Used to signify a failure in the robot's core."""


class InvalidOptions(AlphaBotException):
    """Robot failed because input options were somehow broken."""

Exemplo n.º 47
0
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = AttrDict(
            libnotify=LibnotifyNotifier(),
            kodi_notifier=KODINotifier(),
            plex_notifier=PLEXNotifier(),
            emby_notifier=EMBYNotifier(),
            nmj_notifier=NMJNotifier(),
            nmjv2_notifier=NMJv2Notifier(),
            synoindex_notifier=synoIndexNotifier(),
            synology_notifier=synologyNotifier(),
            pytivo_notifier=pyTivoNotifier(),
            growl_notifier=GrowlNotifier(),
            prowl_notifier=ProwlNotifier(),
            libnotify_notifier=LibnotifyNotifier(),
            pushover_notifier=PushoverNotifier(),
            boxcar_notifier=BoxcarNotifier(),
            boxcar2_notifier=Boxcar2Notifier(),
            nma_notifier=NMA_Notifier(),
            pushalot_notifier=PushalotNotifier(),
            pushbullet_notifier=PushbulletNotifier(),
            freemobile_notifier=FreeMobileNotifier(),
            twitter_notifier=TwitterNotifier(),
            trakt_notifier=TraktNotifier(),
            email_notifier=EmailNotifier()
        )

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []
Exemplo n.º 48
0
    ]
)
PORT = 8888
if __name__ == "__main__":
    import logging

    logFormat = "%(asctime)s:%(levelname)s:%(funcName)s:%(message)s"
    logging.basicConfig(format=logFormat)

    #    options.options['log_file_prefix'].set('/opt/logs/my_app.log')
    # options.parse_command_line() # read the command-line to get log_file_prefix from there
    _log = logging.getLogger("tornado.application")
    _log.critical("\n--------------------------------------------------------------------------\nApplication starting")
    _log.setLevel(LOG_LEVEL)  # 50 critical, 40 error, 30 warning, 20 info, 10 debug
    _log.critical("Current log-level : %s", logging.getLevelName(_log.getEffectiveLevel()))
    # _setupsql() # Prepare sqls
    # Setup the server
    application.listen(PORT)
    _log.info("Listening on port %d" % PORT)
    _dbHelper = DBHelperSQLITE()
    _scheduler = TornadoScheduler()
    _scheduler.start()

    ioloop = IOLoop.instance()
    _bgtask = TaskRunner(ioloop)

    TaskRunner.bgsetup(_bgtask)

    _log.info("Background task is set up, starting ioloop")
    ioloop.start()
Exemplo n.º 49
0
            SRC_DIR, log_file),
        shell=True)


def crawl_guba():
    # 股吧爬虫
    log_file = os.path.join(LOG_DIR, 'guba.log')
    subprocess.call(
        'cd {0}/websitespider && scrapy crawl guba --logfile={1}'.format(
            SRC_DIR, log_file),
        shell=True)


if __name__ == '__main__':
    # -c scray_wechat
    scheduler = TornadoScheduler()
    # cnmn 中国有色网 的爬虫 一分钟
    scheduler.add_job(crawl_cnmn, 'cron', minute="1-59", hour="*")
    # wechat 微信公众号 的爬虫 一分钟
    scheduler.add_job(crawl_wechat, 'cron', minute="1", hour="*")
    # 上海有色网 的接口  30s
    scheduler.add_job(get_smm_interface, 'interval', seconds=30)
    # 华尔街见闻 的接口  30s
    scheduler.add_job(get_wallstreet_interface, 'interval', seconds=30)
    # 财联社直播  每分钟
    scheduler.add_job(get_cailian_interface, 'cron', minute="*", hour="*")
    # 股吧
    scheduler.add_job(crawl_guba, 'cron', minute="5", hour="*")
    # 华尔街见闻新闻
    scheduler.add_job(crawl_wallstreet, 'cron', minute="4", hour="*")
    # 金融界咨询
 def __init__(self):
     self.scheduler = TornadoScheduler()
Exemplo n.º 51
0
job_defaults = {'coalesce': False, 'max_instances': 3}


class MainHandler(tornado.web.RequestHandler):
    def get(self):
        self.write("Hello World")


def make_app():
    return tornado.web.Application([
        (r"/", MainHandler),
    ])


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    # alarm_time = datetime.now() + timedelta(seconds=10)
    scheduler.add_job(tick, 'interval', seconds=10)
    # scheduler.add_job(sensorTimerA, 'interval', seconds=19)
    print('To clear the alarms, delete the example.sqlite file.')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)
    scheduler.start()
    app = make_app()
    app.listen(8888)
    tornado.ioloop.IOLoop.current().start()
Exemplo n.º 52
0
"""
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from tornado.ioloop import IOLoop
from apscheduler.schedulers.tornado import TornadoScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
class ScheduleServer():
    def __init__(self):
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexPageHandler),

                    # --- PROJECT ---  #
                    # List of project summary.
                    (r'/api/v0/projects', apih.ListProjectSummaryHandler),
                    # Details of a project. Args: project id
                    (r'/api/v0/project/([^/]+)/details', apih.ProjectDetailsHandler),
                    # List of details of resources. Args: project id
                    (r'/api/v0/project/([^/]+)/resource/details', apih.ProjectResourceDetailsHandler),
                    # List of language status. Args: project id 
                    (r'/api/v0/project/([^/]+)/translation/status', apih.ProjectTranslationStatusHandler),

                    # --- JOB --- #
                    # List of jobs.
                    (r'/api/v0/jobs', apih.ListJobSummaryHandler),
                    # Summary of a job. Args: job id
                    (r'/api/v0/job/([^/]+)', apih.JobSummaryHandler),
                    # Execute a job. Args: job id
                    (r'/api/v0/job/([^/]+)/exec', apih.JobExecutionHandler),
                    # Details of a job. Args: job id
                    (r'/api/v0/job/([^/]+)/details', apih.JobDetailsHandler),
                    # List of resource file path and slug. Args: job id 
                    (r'/api/v0/job/([^/]+)/resource/slugs', apih.JobResourceSlugsHandler),
                    # List of resource name (in translation platform) and slug. Args: job id
                    (r'/api/v0/job/([^/]+)/translation/slugs', apih.JobTranslationSlugsHandler),
                    # Sync status (only for ResourceUploaderJob or TranslationUploaderJob). Args: job id
                    (r'/api/v0/job/([^/]+)/sync/status', apih.JobSyncStatusHandler),
                    # Job execution status. Args: job id
                    (r'/api/v0/job/([^/]+)/exec/status', apih.JobExecStatusHandler),

                    # maybe /job/(^/]+)/log/context/3  (limit = 3) might be useful

                    # --- CONFIGURATION --- #
                    # not using now but keep for a while   
                    # (r'/api/v0/config/([^/]+)/([^/]+)', apih.ConfigurationHandler), # job id, 'key' in config file
                    # Contents of project configuration file. Args: project id
                    # (r'/api/v0/config/project/([^/]+)', apih.ProjectConfigurationHandler),
                    # Contents of job configuration file. Args: job id
                    # (r'/api/v0/config/job/([^/]+)', apih.JobConfigurationHandler),
                    # Contents of resource configuration file. Args: resource configuration file name
                    (r'/api/v0/config/resource/([^/]+)', apih.ResourceConfigurationHandler),
                    # Contents of translation configuration file. Args: translation configuration file name
                    (r'/api/v0/config/translation/([^/]+)', apih.TranslationConfigurationHandler),

                    #--- LOG (JOB EXECUTION LOG) --- #
                    # Log context. Args: log path
                    (r'/api/v0/log/([^/]+)/context', apih.LogContextHandler),

                    # --- LOCAL FILES --- #
                    # List local repositories (git repository).
                    (r'/api/v0/local/repositories', apih.ListLocalRepositoriesHandler),
                    # List branches of specified local git repository. Args: repository name
                    (r'/api/v0/local/repository/([^/]+)/branches', apih.ListLocalRepositoryBranchesHandler),
                    # List local files under specified directory. Args: repository name, relative path in the repository
                    (r'/api/v0/local/repository/([^/]+)/files/([^/]+)', apih.ListLocalRepositoryFilesHandler),

                    # --- RESOURCE REPOSITORY --- #
                    # List of repositories. Args: platform name
                    #(r'/api/v0/resource/([^/]+)/repositories', apih.ListResourceRepositoriessHandler),

                    #--- TRANSLATION REPOSITORY --- #
                    # List of projects. Args: platform name
                    (r'/api/v0/translation/([^/]+)/projects', apih.ListTranslationProjectsHandler),
                    # Project details. Args: platform name, project id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/details', apih.TranslationProjectDetailsHandler),
                    # Resource details. Args: platform name, project id, resource id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/details', apih.TranslationResourceDetailsHandler),
                    # All strings for a language. Args: platform name, project id, resource id, language code
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', apih.TranslationTranslationStringsHandler),
                    # Details for a translation string. Args: platform name, project id, resource id, source string id
                    # (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/details', apih.TranslationTranslationStringDetailsHandler),
                    # Details for a source string. Args: platform name, project id, resource id, source string id (key)
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/([^/]+)/details', apih.TranslationSourceStringDetailsHandler)
                ],
                template_path = os.path.join(os.path.dirname(__file__), 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self._restore_jobs()
        self.scheduler.start()
        logger.info(self.scheduler.print_jobs())

    def _restore_jobs(self):
        global job
        configs = job.get_configuration(status='active')
        total = 0
        for c in configs:
            o = SchedulerJob(c)
            self.scheduler.add_job(o.execute, 'cron', month=c.month, day=c.day, day_of_week=c.day_of_week, hour=c.hour, minute=c.minute, name=c.name, id=c.id, misfire_grace_time = 600)
            total += 1
        logger.info("Restored '{}' jobs.".format(total))

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')
        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping scheduler...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)
Exemplo n.º 54
0
class Core(object):
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = AttrDict(
            libnotify=LibnotifyNotifier(),
            kodi_notifier=KODINotifier(),
            plex_notifier=PLEXNotifier(),
            emby_notifier=EMBYNotifier(),
            nmj_notifier=NMJNotifier(),
            nmjv2_notifier=NMJv2Notifier(),
            synoindex_notifier=synoIndexNotifier(),
            synology_notifier=synologyNotifier(),
            pytivo_notifier=pyTivoNotifier(),
            growl_notifier=GrowlNotifier(),
            prowl_notifier=ProwlNotifier(),
            libnotify_notifier=LibnotifyNotifier(),
            pushover_notifier=PushoverNotifier(),
            boxcar_notifier=BoxcarNotifier(),
            boxcar2_notifier=Boxcar2Notifier(),
            nma_notifier=NMA_Notifier(),
            pushalot_notifier=PushalotNotifier(),
            pushbullet_notifier=PushbulletNotifier(),
            freemobile_notifier=FreeMobileNotifier(),
            twitter_notifier=TwitterNotifier(),
            trakt_notifier=TraktNotifier(),
            email_notifier=EmailNotifier()
        )

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                                 os.path.join(sickrage.DATA_DIR, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # initialize the main SB database
        main_db.MainDB().InitialSchema().upgrade()

        # initialize the cache database
        cache_db.CacheDB().InitialSchema().upgrade()

        # initialize the failed downloads database
        failed_db.FailedDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for dir in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug(
                    "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole',
                                                'utorrent',
                                                'transmission',
                                                'deluge',
                                                'deluged',
                                                'download_station',
                                                'rtorrent',
                                                'qbittorrent',
                                                'mlnet',
                                                'putio'): self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi),
                                   (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
                                   (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
                                   (self.srConfig.METADATA_PS3, ps3),
                                   (self.srConfig.METADATA_WDTV, wdtv),
                                   (self.srConfig.METADATA_TIVO, tivo),
                                   (self.srConfig.METADATA_MEDE8ER, mede8er)]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add show queue job
        self.srScheduler.add_job(
            self.SHOWQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SHOWQUEUE",
            id="SHOWQUEUE"
        )

        # add search queue job
        self.srScheduler.add_job(
            self.SEARCHQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SEARCHQUEUE",
            id="SEARCHQUEUE"
        )

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.srScheduler.add_job(
            update_network_dict,
            srIntervalTrigger(**{'days': 1}),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}),
            name="NAMECACHE",
            id="NAMECACHE"
        )

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{'hours': 1,
                   'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ}),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                   'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ}),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                                 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(**{
                'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.srConfig.PROPER_SEARCHER_INTERVAL]}),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.srScheduler.add_job(
            self.TRAKTSEARCHER.run,
            srIntervalTrigger(**{'hours': 1}),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.instance().start()

    def shutdown(self, status=None, restart=False):
        if self.started:
            self.started = False

            if restart:
                self.srLogger.info('SiCKRAGE IS PERFORMING A RESTART!')
            else:
                self.srLogger.info('SiCKRAGE IS PERFORMING A SHUTDOWN!')

            # shutdown/restart webserver
            self.srWebServer.shutdown()

            # shutdown scheduler
            self.srLogger.info("Shutting down scheduler")
            self.srScheduler.shutdown()

            # shutdown queues
            self.srLogger.info("Shutting down queues")
            if self.SHOWQUEUE:
                self.SHOWQUEUE.shutdown()
            if self.SEARCHQUEUE:
                self.SEARCHQUEUE.shutdown()

            if sickrage.srCore.ADBA_CONNECTION:
                self.srLogger.info("Logging out ANIDB connection")
                sickrage.srCore.ADBA_CONNECTION.logout()

            # save all settings
            self.save_all()

            if restart:
                self.srLogger.info('SiCKRAGE IS RESTARTING!')
            else:
                self.srLogger.info('SiCKRAGE IS SHUTDOWN!')

            # shutdown logging
            self.srLogger.shutdown()

        # delete pid file
        if sickrage.DAEMONIZE:
            sickrage.delpid(sickrage.PID_FILE)

        # system exit with status
        if not restart:
            sys.exit(status)

        # stop ioloop event handler
        IOLoop.current().stop()

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for sqlShow in main_db.MainDB().select("SELECT * FROM tv_shows"):
            try:
                curshow = TVShow(int(sqlShow["indexer"]), int(sqlShow["indexer_id"]))
                self.srLogger.debug("Loading data for show: [{}]".format(curshow.name))
                #self.NAMECACHE.buildNameCache(curshow)
                curshow.nextEpisode()
                self.SHOWLIST += [curshow]
            except Exception as e:
                self.srLogger.error(
                    "There was an error creating the show in {}: {}".format(sqlShow["location"], e.message))
                self.srLogger.debug(traceback.format_exc())
    def __init__(self):
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexPageHandler),

                    # --- PROJECT ---  #
                    # List of project summary.
                    (r'/api/v0/projects', apih.ListProjectSummaryHandler),
                    # Details of a project. Args: project id
                    (r'/api/v0/project/([^/]+)/details', apih.ProjectDetailsHandler),
                    # List of details of resources. Args: project id
                    (r'/api/v0/project/([^/]+)/resource/details', apih.ProjectResourceDetailsHandler),
                    # List of language status. Args: project id 
                    (r'/api/v0/project/([^/]+)/translation/status', apih.ProjectTranslationStatusHandler),

                    # --- JOB --- #
                    # List of jobs.
                    (r'/api/v0/jobs', apih.ListJobSummaryHandler),
                    # Summary of a job. Args: job id
                    (r'/api/v0/job/([^/]+)', apih.JobSummaryHandler),
                    # Execute a job. Args: job id
                    (r'/api/v0/job/([^/]+)/exec', apih.JobExecutionHandler),
                    # Details of a job. Args: job id
                    (r'/api/v0/job/([^/]+)/details', apih.JobDetailsHandler),
                    # List of resource file path and slug. Args: job id 
                    (r'/api/v0/job/([^/]+)/resource/slugs', apih.JobResourceSlugsHandler),
                    # List of resource name (in translation platform) and slug. Args: job id
                    (r'/api/v0/job/([^/]+)/translation/slugs', apih.JobTranslationSlugsHandler),
                    # Sync status (only for ResourceUploaderJob or TranslationUploaderJob). Args: job id
                    (r'/api/v0/job/([^/]+)/sync/status', apih.JobSyncStatusHandler),
                    # Job execution status. Args: job id
                    (r'/api/v0/job/([^/]+)/exec/status', apih.JobExecStatusHandler),

                    # maybe /job/(^/]+)/log/context/3  (limit = 3) might be useful

                    # --- CONFIGURATION --- #
                    # not using now but keep for a while   
                    # (r'/api/v0/config/([^/]+)/([^/]+)', apih.ConfigurationHandler), # job id, 'key' in config file
                    # Contents of project configuration file. Args: project id
                    # (r'/api/v0/config/project/([^/]+)', apih.ProjectConfigurationHandler),
                    # Contents of job configuration file. Args: job id
                    # (r'/api/v0/config/job/([^/]+)', apih.JobConfigurationHandler),
                    # Contents of resource configuration file. Args: resource configuration file name
                    (r'/api/v0/config/resource/([^/]+)', apih.ResourceConfigurationHandler),
                    # Contents of translation configuration file. Args: translation configuration file name
                    (r'/api/v0/config/translation/([^/]+)', apih.TranslationConfigurationHandler),

                    #--- LOG (JOB EXECUTION LOG) --- #
                    # Log context. Args: log path
                    (r'/api/v0/log/([^/]+)/context', apih.LogContextHandler),

                    # --- LOCAL FILES --- #
                    # List local repositories (git repository).
                    (r'/api/v0/local/repositories', apih.ListLocalRepositoriesHandler),
                    # List branches of specified local git repository. Args: repository name
                    (r'/api/v0/local/repository/([^/]+)/branches', apih.ListLocalRepositoryBranchesHandler),
                    # List local files under specified directory. Args: repository name, relative path in the repository
                    (r'/api/v0/local/repository/([^/]+)/files/([^/]+)', apih.ListLocalRepositoryFilesHandler),

                    # --- RESOURCE REPOSITORY --- #
                    # List of repositories. Args: platform name
                    #(r'/api/v0/resource/([^/]+)/repositories', apih.ListResourceRepositoriessHandler),

                    #--- TRANSLATION REPOSITORY --- #
                    # List of projects. Args: platform name
                    (r'/api/v0/translation/([^/]+)/projects', apih.ListTranslationProjectsHandler),
                    # Project details. Args: platform name, project id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/details', apih.TranslationProjectDetailsHandler),
                    # Resource details. Args: platform name, project id, resource id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/details', apih.TranslationResourceDetailsHandler),
                    # All strings for a language. Args: platform name, project id, resource id, language code
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', apih.TranslationTranslationStringsHandler),
                    # Details for a translation string. Args: platform name, project id, resource id, source string id
                    # (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/details', apih.TranslationTranslationStringDetailsHandler),
                    # Details for a source string. Args: platform name, project id, resource id, source string id (key)
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/([^/]+)/details', apih.TranslationSourceStringDetailsHandler)
                ],
                template_path = os.path.join(os.path.dirname(__file__), 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self._restore_jobs()
        self.scheduler.start()
        logger.info(self.scheduler.print_jobs())
Exemplo n.º 56
0

class AuthLogoutHandler(BaseHandler):

    def get(self):
        self.clear_cookie("live_digg")
        self.redirect(self.get_argument("next", "/"))


class EntryModule(tornado.web.UIModule):

    def render(self, entry):
        return self.render_string("modules/entry.html", entry=entry)


if __name__ == "__main__":
    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    scheduler = TornadoScheduler()
    scheduler.add_job(pushmsgs.scraper, 'cron', day_of_week='mon-fri',
                      hour='*', minute='*/15', id="diggScraper")
    scheduler.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        threading.Thread(target=redis_listener).start()
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 57
0
    def start(self):
        self.started = True

        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.api = API()
        self.main_db = MainDB(self.db_type, self.db_prefix, self.db_host,
                              self.db_port, self.db_username, self.db_password)
        self.cache_db = CacheDB(self.db_type, self.db_prefix, self.db_host,
                                self.db_port, self.db_username,
                                self.db_password)
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})
        self.wserver = WebServer()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()
        self.announcements = Announcements()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id=self.oidc_client_id,
            client_secret=self.oidc_client_secret)

        # check available space
        try:
            self.log.info("Performing disk space checks")
            total_space, available_space = get_free_space(self.data_dir)
            if available_space < 100:
                self.log.warning(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restore_app_data(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: %s!" %
                          ("FAILED", "SUCCESSFUL")[success])
            if success:
                self.main_db = MainDB(self.db_type, self.db_prefix,
                                      self.db_host, self.db_port,
                                      self.db_username, self.db_password)
                self.cache_db = CacheDB(self.db_type, self.db_prefix,
                                        self.db_host, self.db_port,
                                        self.db_username, self.db_password)
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.move_file(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # init encryption public and private keys
        encryption.initialize()

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # perform integrity check
            self.log.info("Performing integrity check on {} database".format(
                db.name))
            db.integrity_check()

            # migrate database
            self.log.info("Performing migrations on {} database".format(
                db.name))
            db.migrate()

            # sync database repo
            self.log.info("Performing sync on {} database".format(db.name))
            db.sync_db_repo()

            # cleanup
            self.log.info("Performing cleanup on {} database".format(db.name))
            db.cleanup()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        uses_netloc.append('scgi')
        FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # load quicksearch cache
        self.quicksearch_cache.load()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(self.version_updater.run,
                               IntervalTrigger(
                                   hours=self.config.version_updater_freq,
                                   timezone='utc'),
                               name=self.version_updater.name,
                               id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(self.tz_updater.run,
                               IntervalTrigger(days=1, timezone='utc'),
                               name=self.tz_updater.name,
                               id=self.tz_updater.name)

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour),
                                   timezone='utc'),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add rss cache updater job
        self.scheduler.add_job(self.rsscache_updater.run,
                               IntervalTrigger(minutes=15, timezone='utc'),
                               name=self.rsscache_updater.name,
                               id=self.rsscache_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4),
                            timezone='utc'),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30),
                            timezone='utc'),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(self.auto_postprocessor.run,
                               IntervalTrigger(
                                   minutes=self.config.autopostprocessor_freq,
                                   timezone='utc'),
                               name=self.auto_postprocessor.name,
                               id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(self.proper_searcher.run,
                               IntervalTrigger(minutes={
                                   '15m': 15,
                                   '45m': 45,
                                   '90m': 90,
                                   '4h': 4 * 60,
                                   'daily': 24 * 60
                               }[self.config.proper_searcher_interval],
                                               timezone='utc'),
                               name=self.proper_searcher.name,
                               id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1, timezone='utc'),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(self.subtitle_searcher.run,
                               IntervalTrigger(
                                   hours=self.config.subtitle_searcher_freq,
                                   timezone='utc'),
                               name=self.subtitle_searcher.name,
                               id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime,
                            timezone='utc'),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # add announcements job
        self.scheduler.add_job(self.announcements.run,
                               IntervalTrigger(minutes=15, timezone='utc'),
                               name=self.announcements.name,
                               id=self.announcements.name)

        # start queues
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # fire off startup events
        self.io_loop.add_callback(self.load_shows)
        self.io_loop.add_callback(self.version_updater.run)
        self.io_loop.add_callback(self.tz_updater.run)
        self.io_loop.add_callback(self.announcements.run)

        # start scheduler service
        self.scheduler.start()

        # start web server
        self.wserver.start()

        # launch browser window
        if all(
            [not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.io_loop.add_callback(
                functools.partial(launch_browser,
                                  ('http',
                                   'https')[sickrage.app.config.enable_https],
                                  sickrage.app.config.web_host,
                                  sickrage.app.config.web_port))

        def started():
            threading.currentThread().setName('CORE')
            self.log.info("SiCKRAGE :: STARTED")
            self.log.info("SiCKRAGE :: APP VERSION:[{}]".format(
                sickrage.version()))
            self.log.info("SiCKRAGE :: CONFIG VERSION:[v{}]".format(
                self.config.config_version))
            self.log.info("SiCKRAGE :: DATABASE VERSION:[v{}]".format(
                self.main_db.version))
            self.log.info("SiCKRAGE :: DATABASE TYPE:[{}]".format(
                self.db_type))
            self.log.info("SiCKRAGE :: URL:[{}://{}:{}{}]".format(
                ('http', 'https')[self.config.enable_https],
                self.config.web_host, self.config.web_port,
                self.config.web_root))

        # start io_loop
        self.io_loop.add_callback(started)
        self.io_loop.start()