Exemple #1
0
class ScheInterface(object):
    def __init__(self, job_type, store_executor_alias, process_count):
        self.sche = TornadoScheduler()
        self.host = MONGO_CONFIG.get('host')
        self.mongo_client = MongoClient(self.host)
        self.job_type = job_type
        self.mongo_job_store = MongoDBJobStore(collection='job',
                                               database=DBNAME,
                                               client=self.mongo_client)
        self.store_executor_alise = store_executor_alias
        self.process_poll = ProcessPoolExecutor(process_count)

    def add_date_job(self, func, args, run_date, max_instances, listener_fun):
        self.sche.add_jobstore(self.mongo_job_store,
                               alias=self.store_executor_alise)
        self.sche.add_executor(self.process_poll,
                               alias=self.store_executor_alise)
        self.sche.add_listener(listener_fun,
                               EVENT_JOB_ERROR | EVENT_JOB_MISSED)
        try:
            self.sche.add_job(func,
                              self.job_type,
                              args=args,
                              run_date=run_date,
                              max_instances=max_instances,
                              jobstore=self.store_executor_alise)
            return True
        except Exception:
            return False
Exemple #2
0
class IpPool(object):
    def __init__(self):
        None

    def start(self):
        self._scheduler = TornadoScheduler()
        self._scheduler.add_job(
            fetchips,
            'interval',
            seconds=int(
                ApplicationProperties.configure(
                    "application.shared.ippool.fetchInterval")))
        self._scheduler.start()

        print('Application listeming...[Port:%s]' %
              (ApplicationProperties.configure(
                  p_key='application.shared.ippool.server.port')))
        application = Application([
            (RequestMapping.dynamic_ip_assign, IPAssign),
        ])
        application.listen(
            ApplicationProperties.configure(
                p_key='application.shared.ippool.server.port'))

        #IOLoop.current().start()
        IOLoop.current().run_sync(fetchips)
        IOLoop.current().start()
Exemple #3
0
class GamesService(WebService):
    _name = "Games Service"
    _id = "games"

    def __init__(self, Routes):
        self.scheduler = TornadoScheduler(timezone=utc)
        # self.scheduler.add_job(self.score_games)
        self.scheduler.add_job(self.score_games, 'interval', hours=1)
        self.emitter = ipc.Emitter()
        self.listener = ipc.Listener({
            "games:getScores": self.score_games
        })
        super().__init__(Routes)

    
    def score_games(self, date=None):
        today = datetime_for_game(date)
        print("games for {}".format(str(today)))    
        games = NBAGame.find_sync({"Date": today})
        print("imported {} games today".format(len(games)))    
        box_results = []
        score_results = []
        game_count = 0
        for game in games:
            box_results.append( nba_jobs.box_score.delay(game["fid"]) )
        for result in box_results:
            score_results.append( nba_jobs.score_players.delay(result.get()) )
        for result in score_results:
            playerScoring = result.get()
            self.emitter.publish("games:scores", playerScoring)
            game_count += 1    
        print("scored {} games".format(game_count))
Exemple #4
0
def InitConfig():
    client = AliyunClient()
    client.worker.start()
    sched = TornadoScheduler()
    sched.add_job(InitModel, 'interval', seconds=60, id="1")
    sched.start()
    ioloop.IOLoop.instance().start()
Exemple #5
0
    def __init__(self):
        g.load_global_data()
        if not g.init_success:
            raise Exception('初始化加载 global_data 失败!')

        scheduler = TornadoScheduler()
        scheduler.add_job(scheduler_service.do_every_day, 'cron', day_of_week='0-6', hour=0, minute=30)
        scheduler.start()
 def run(self):
     scheduler = TornadoScheduler()
     scheduler.add_job(sync_acba_sentence,
                       "cron",
                       hour='16',
                       minute='43',
                       args=[self.Session])
     scheduler.start()
Exemple #7
0
def myjob():
    scheduler = TornadoScheduler()
    scheduler.add_job(
        night, 'cron', hour=20, minute=0, second=0
    )  #自动发送day_of_week='mon,tue,wed,thu,fri,sat,sun',单多选周几可连续mon-wed,调用的函数无参
    scheduler.add_job(moring, 'cron', hour=7, minute=0,
                      second=0)  #hour=5, minute=30, end_date='2016-12-31'截止日期
    scheduler.start()
Exemple #8
0
def main():
    # Use the above instantiated scheduler
    # Set Tornado Scheduler
    scheduler = TornadoScheduler()
    # Use the imported jobs, every 60 minutes
    scheduler.add_job(log_theta, 'interval', minutes=60)
    scheduler.start()
    application.listen(settings["listen.port"])
    tornado.ioloop.IOLoop.instance().start()
Exemple #9
0
def interval_event():
    stsTokenManager = StsTokenManager()
    scheduler = TornadoScheduler()
    scheduler.add_job(stsTokenManager.createStsToken, 'interval', seconds=3000)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=9)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=11)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=13)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=17)
    scheduler.start()
class SnapshotUtil(object):
    instance = None

    def __init__(self):
        self.scheduler = TornadoScheduler()
    @staticmethod
    def get_instance():
        if not SnapshotUtil.instance:
            SnapshotUtil.instance = SnapshotUtil()
        return SnapshotUtil.instance


    def set_snapshot_interval(self, pos, hours):
        self.scheduler.reschedule_job('snapshot_%d' % pos, trigger='interval', hours=hours)


    def snapshot_start(self):
        self.scheduler.start()


    def snapshot_stop(self):
        self.scheduler.shutdown()

    def snapshot(self, pos, url):
        print "snapshot",pos,url
        path = os.path.join(os.getcwd(),CAPTURED_DIR)
        path = os.path.join(path, str(pos)+"_"+time.strftime("%Y_%m_%d_%H_%M_%S",time.localtime(time.time()))+".jpeg")
        command = "sudo ffmpeg -ss 30 -i \'%s\' -y -t 30 -r 1 -f image2 %s" % (url, path)
        print command
        os.system(command)
        time.sleep(5)
        add_caputured_image(os.path.basename(path), pos, 1920, 1080)
        self.update_image(path, pos)


    def update_image(self, path, pos):
        datagen, headers = multipart_encode({'file': open(path, "rb"), 'pos': pos, "mac": get_mac_address(),'created_at': time.time()})
        url = 'http://%s:%d%s'%(SERVER_WEBSITE, API_PORT, UPLOAD_IMG_URL)
        print url
        request = urllib2.Request(url, datagen, headers)
        res = urllib2.urlopen(request, timeout=30).read()
        print "ok"
        if res == "ok":
            return
        else:
            return

    def remove_snapshot(self,pos):
        self.scheduler.remove_job("snapshot_%d"%pos)

    def add_snapshot(self, pos, url, hours):
        self.scheduler.add_job(self.snapshot, 'interval', args=[ pos, url], seconds=100, id="snapshot_%d"%(pos))

    def init_snapshot(self):
        positions = get_positions()
        for pos in positions:
            self.add_snapshot(int(pos['position']), pos['ip_address'], pos['duration'])
Exemple #11
0
def main():
    # Use the above instantiated scheduler
    # Set Tornado Scheduler
    scheduler = TornadoScheduler()
    # Use the imported jobs, every 60 minutes
    scheduler.add_job(log_theta, 'interval', minutes=60)
    scheduler.start()
    application.listen(settings["listen.port"])
    tornado.ioloop.IOLoop.instance().start()
Exemple #12
0
class TimeTask(object):
    def __init__(self, sqlalchemy_engine):
        self.scheduler = TornadoScheduler()
        self.scheduler.add_jobstore("sqlalchemy", engine=sqlalchemy_engine)

    def add_cache_flush_task(self, func, *args, **kwargs):
        self.scheduler.add_job(func, 'cron', args=args, kwargs=kwargs,
                               id="cache_flush", replace_existing=True, hour=0, day='*')
        return self

    def start_tasks(self):
        self.scheduler.start()
Exemple #13
0
def TornadoScheduler_test():
    sched = TornadoScheduler()
    sched.add_job(tick, 'interval', seconds=3)
    sched.add_job(tick1, 'interval', id='1', seconds=1)
    sched.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
def main():
    verifyDatabase()
    trigger = CronTrigger(second='*/59')
    scheduler = TornadoScheduler()
    scheduler.add_job(runTests, trigger)
    scheduler.start()
    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(RFBGATEWAYPORT)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        tornado.ioloop.IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #15
0
def main():
    # Running APScheduler
    aps = TornadoScheduler()
    aps.add_jobstore('mongodb', collection='example_jobs')
    aps.remove_all_jobs()
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.start()
    # Running server
    app = TornadoApplication()
    app.listen(options.port)
    tornado.ioloop.IOLoop.current().start()
Exemple #16
0
    def excute_job(self):
        sched = TornadoScheduler()
        for arg in self.args:
            sched.add_job(func=self.job,
                          args=arg,
                          trigger=IntervalTrigger(start_date=self.timer,
                                                  days=1))
        sched.start()

        try:
            tornado.ioloop.IOLoop.current().start()
        except (KeyboardInterrupt, SystemExit):
            sched.shutdown()
Exemple #17
0
def main(port, host):
    app = Application()
    app.listen(port, "0.0.0.0")
    if platform.system() == 'Linux':
        get_server_info()
    scheduler = TornadoScheduler()
    scheduler.add_job(modify_ip_protectgroup.modify_ip_protectgroup,
                      'interval',
                      seconds=10)
    scheduler.add_job(modify_ip_protectgroup.modify_package,
                      'interval',
                      seconds=10)
    scheduler.add_job(CheckProtectPackageDueTime.CheckProtectPackageDueTime,
                      'cron',
                      hour=0,
                      minute=0)
    scheduler.add_job(delete_ip_white_list.delete_ip_white_list,
                      'interval',
                      seconds=20)
    # if CONFIG.RecordLog.record_way == 'new':
    #     scheduler.add_job(getblocklist.block_cmcc_log, 'interval', seconds=1200)
    #     scheduler.add_job(getblocklist.block_cnc_log, 'interval', seconds=1200)
    #     scheduler.add_job(getblocklist.block_ctc_log, 'interval', seconds=1200)
    # else:
    #     scheduler.add_job(getblocklist.block_cmcc_log, 'interval', seconds=60)
    #     scheduler.add_job(getblocklist.block_cnc_log, 'interval', seconds=60)
    #     scheduler.add_job(getblocklist.block_ctc_log, 'interval', seconds=60)
    scheduler.start()
    tornado.ioloop.IOLoop.instance().start()
def start_schedule():
    global schedule
    if schedule is None:
        logger.info("Launching scheduler")
        schedule = TornadoScheduler()
        schedule.start()
        logger.info("Hydrating schedule with surveys")
        load_persisted_tasks()
        logger.info(
            "Preparing maintenance jobs for updating schedule (adding and removing)"
        )
        schedule.add_job(check_for_new_tasks, 'interval', minutes=5)
        schedule.add_job(check_for_removed_tasks, 'interval', minutes=30)
    else:
        logger.info("Schedule was already running")
def main():

    define('port', default=3333, help='bind to this port', type=int)
    define('listen', default='127.0.0.1', help='listen address', type=str)
    define('debug', default=False, help='debug', type=bool)
    define("no_ts", default=False, help="timestamp when logging", type=bool)
    define("address", default='', help="Divoom max address", type=str)
    # define('log_file_prefix', default='/var/log/tb-evo-rest.log', help='log file prefix')

    tornado.options.parse_command_line()

    # Create an instance of tornado formatter, just overriding the 'fmt' 'datefmt' args

    if options.no_ts:
        my_log_format = '%(color)s%(levelname)1.1s [%(module)s:%(lineno)d]%(end_color)s %(message)s'
    else:
        my_log_format = '%(color)s%(asctime)s %(levelname)1.1s [%(module)s:%(lineno)d]%(end_color)s %(message)s'

    my_log_formatter = LogFormatter(fmt=my_log_format, datefmt='%Y-%m-%d %H:%M:%S', color=True)

    for handler in logging.getLogger().handlers:
        handler.setFormatter(my_log_formatter)

    application = Application()
    http_server = tornado.httpserver.HTTPServer(application, xheaders=True)
    http_server.listen(options.port, address=options.listen)

    # Schedule job for forecast
    scheduler = TornadoScheduler()
    # logging.getLogger('apscheduler.base').setLevel(logging.WARNING)
    # logging.getLogger('apscheduler').setLevel(logging.WARNING)

    scheduler.start()
    scheduler.add_job(lambda: five_min_ticker(application.divoom()), trigger='interval', start_date="2018-01-01", seconds=5 * 60)
    scheduler.add_job(fifteen_min_ticker, trigger='interval', start_date="2018-01-01", seconds=15 * 60)
    logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)

    # Setup signal handlers
    signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(exit))
    signal.signal(signal.SIGTERM, lambda sig, frame: ioloop.add_callback_from_signal(exit))
    atexit.register(lambda: shutdownHandler(application, scheduler))

    # Fire up our server

    logging.info('Server started on %s:%d', options.listen, options.port)

    ioloop.start()
def main():
    tornado.options.parse_config_file(os.path.join(os.path.dirname(__file__), "config.py"))
    tornado.options.parse_command_line()
    global app
    app = Application()
    app.iphone_data = {}

    scraper_all()
    scheduler = TornadoScheduler()
    time_interval = 60
    scheduler.add_job(scraper_all, 'interval', seconds=time_interval)
    scheduler.add_job(update_data, 'interval', seconds=time_interval)
    scheduler.start()

    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()
Exemple #21
0
def tornado_schedule():
    from tornado.ioloop import IOLoop
    from apscheduler.schedulers.tornado import TornadoScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #22
0
def tornado_schedule():
    from tornado.ioloop import IOLoop
    from apscheduler.schedulers.tornado import TornadoScheduler

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
def main():
    tornado.options.parse_config_file(
        os.path.join(os.path.dirname(__file__), "config.py"))
    tornado.options.parse_command_line()
    global app
    app = Application()
    app.iphone_data = {}

    scraper_all()
    scheduler = TornadoScheduler()
    time_interval = 60
    scheduler.add_job(scraper_all, 'interval', seconds=time_interval)
    scheduler.add_job(update_data, 'interval', seconds=time_interval)
    scheduler.start()

    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()
Exemple #24
0
    def _scheduler_task(self):    
        scheduler = TornadoScheduler(timezone="UTC") 
        # scheduler.add_job(self._scrapy_job,'interval', minutes=5)
        
        d1 = datetime.datetime.now()
        d2 = d1+datetime.timedelta(seconds=10)
        dates = d2.strftime("%Y-%m-%d %H:%M:%S")

        # scheduler.add_job(self._scrapy_job,'date', run_date=dates, args=[])

        scheduler.add_job(self._scrapy_job,'interval', minutes=50)

        # scheduler.add_job(self._scan_database_address,'date', run_date=dates, args=[])

        scheduler.add_job(self._scan_database_address,'interval', minutes=20)

        logg.info("Scheduler Task Start")
        scheduler.start()
Exemple #25
0
class TimeTask:

    def __init__(self):
        self.scheduler = TornadoScheduler()
        # 存储器 默认使用sql数据库
        mysql_store = SQLAlchemyJobStore(url=engine_url, tablename='mex_apscheduler_jobs')
        self.scheduler.add_jobstore(mysql_store)
        ### 可以自定义更改 使用redis
        ### redis_store = RedisJobStore(host=redis_config['host'], port=redis_config['port'])
        ### self.scheduler.add_jobstore(redis_store, alias='redis')

    # 添加定时任务
    def add_task(self, func, mode='cron', *args, **kwargs):
        self.scheduler.add_job(func, trigger=mode, *args, **kwargs, replace_existing=True, max_instances=1)
        return self

    # 启动定时任务
    def start_tasks(self):
        self.scheduler.start()
Exemple #26
0
    async def session_start(self, session):
        sc_tables = Scheduler_Table.select().where(
            (Scheduler_Table.date == self.date)
            & (Scheduler_Table.session == session)).execute()
        # 上课做的事
        for sc_table in sc_tables:
            room = ClassRoom.get(id=sc_table.room_id)
            airs = WifiDevice.select().where(
                (WifiDevice.device_name % 'air%')
                & (WifiDevice.class_number == room.name)).execute()
            devices = WifiDevice.select().where(
                (WifiDevice.device_name % 'fan%')
                & (WifiDevice.class_number == room.name)).execute()
            for air in airs:
                # 创建控制器执行命令,空调在单独的板,需要单独设置
                air_contorller = DeviceController(air.device_name,
                                                  air.class_number)
                air_contorller.turn_on_air()
                await air_contorller.send()

            # 控制风扇,灯,窗帘
            contorller = DeviceController(devices[0].device_name,
                                          devices[0].class_number)
            for device in devices:
                contorller.turn_on(device.device_number)
            await contorller.send()

            # 开始下课任务
            end_session_schedule = TornadoScheduler()
            end_session_schedule.add_job(
                self.session_end,
                'cron', (
                    end_session_schedule,
                    airs,
                    devices,
                    session,
                ),
                hour=session_time["end"][session][0],
                minute=session_time["end"][session][1])
            end_session_schedule.start()
            logging.info("room_id: " + str(sc_table.room_id) + " open date: " +
                         self.date + " session: " + str(session))
Exemple #27
0
def main():
    trigger = CronTrigger(second='*/59')
    trigger2 = CronTrigger(second='*/5')
    scheduler = TornadoScheduler()
    scheduler.add_job(register_ip, trigger)
    global job
    job = scheduler.add_job(start_browser, trigger2)
    scheduler.start()
    #print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(HOST_PORT)
    #tornado.ioloop.IOLoop.instance().start()
    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        tornado.ioloop.IOLoop.instance().start()
        #IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #28
0
def main():
    tornado.options.define("port",
                           default=8000,
                           help="run on the given port",
                           type=int)
    tornado.options.log_file_prefix = os.path.join(os.path.dirname(__file__),
                                                   'logs/tornado_main.log')
    # tornado.options.log_rotate_mode = 'time'  # 轮询模式: time or size
    # tornado.options.log_rotate_when = 'D'  # 单位: S / M / H / D / W0 - W6
    # tornado.options.log_rotate_interval = 7  # 间隔: 7天
    tornado.options.parse_command_line()
    [i.setFormatter(LogFormatter()) for i in logging.getLogger().handlers]
    http_server = HTTPServer(Application(handlers, **SETTING['setting']))
    http_server.listen(tornado.options.options.port)
    logging.info("server :{} start...".format(tornado.options.options.port))
    sched = TornadoScheduler()
    # one process
    sched.add_job(load_page, 'interval', hours=2)
    sched.start()
    http_server.start(1)
    tornado.ioloop.IOLoop.current().start()
Exemple #29
0
class Application(tornado.web.Application):
    def __init__(self, routed_handlers, **app_settings):
        super(Application, self).__init__(routed_handlers,
                                          ui_modules=uimodules,
                                          **app_settings)
        self.config = load_config()
        log.debug(self.config)
        self.db_pool = None
        self.scheduler = None

    def run(self, port=8888):
        self.listen(port)
        tornado.ioloop.IOLoop.current().start()

    async def _init(self):
        self.db_pool = await db.create_pool(**self.config.get("database", {}))
        await self.__setup_db()
        await self.__setup_scheduler()

    async def __setup_db(self):
        async with self.db_pool.acquire() as conn:
            # Setup DB
            await db.setup_tables(conn)
            # Add configurated servers
            for server in self.config.get("servers", []):
                if not await db.server_exists(conn, server.get("name")):
                    await db.add_server(conn, server.get("name"),
                                        server.get("url"), server.get("port"))

    async def __setup_scheduler(self):
        self.scheduler = TornadoScheduler()
        for server in self.config.get("servers", []):
            self.scheduler.add_job(run_checks,
                                   'interval', [self.db_pool, server],
                                   name=f"check_{server.get('name')}",
                                   next_run_time=datetime_in_n_seconds(30),
                                   hours=24,
                                   jitter=300)
        self.scheduler.start()
Exemple #30
0
class TimerManager:
    def __init__(self):##牌类型,牌字
       self.m_scheduler = TornadoScheduler()
       self.start()

    def start(self):##
       return self.m_scheduler.start()

    def stop(self):##
       return self.m_scheduler.shutdown()

    def getTimer(self,id):##
       return self.m_scheduler.get_job(str(id))


    def addLoopTimer(self, tid,tick,time,arg):  ##
        try:
            job = self.getTimer(tid)
            if job == None:
               self.m_scheduler.add_job(tick, 'interval', seconds=int(time), args=arg,id=str(tid),replace_existing = True)  # 间隔3秒钟执行一次
        except Exception, e:
           print str(Exception)
Exemple #31
0
class TimeTask(object):
    def __init__(self, sqlalchemy_engine):
        # 调度器(scheduler):调度job
        self.scheduler = TornadoScheduler()
        # 作业存储(job store) redis, mongodb, 关系型数据库, 内存
        self.scheduler.add_jobstore("sqlalchemy", engine=sqlalchemy_engine)

    def add_cache_flush_task(self, func, *args, **kwargs):
        self.scheduler.add_job(func,
                               'cron',
                               args=args,
                               kwargs=kwargs,
                               id="cache_flush",
                               replace_existing=True,
                               hour=0,
                               day='*')
        # (func,'cron', day_of_week='mon-fri', hour='0-9', minute='30-59', second='*/3')
        # 模仿cron来执行的,在周一到周五其间,每天的0点到9点,在30分到59分之间执行,执行频次为3秒
        return self

    def start_tasks(self):
        self.scheduler.start()
Exemple #32
0
    def start(cls):
        # Read command line arguments
        context: ApplicationContext = ArgumentParsingUtils.parse_arguments()

        #initiate application logging
        Logger.initiate_logger(context.logger_configs)
        
        #getting logger
        logger = Logger(cls.__name__)
        logger.info('Initializing the application...')
        logger.debug(f'Configurations: {context}')

        settings = {
            'template_path': abspath(join(dirname(__file__), '../../views/templates')),
            'static_path': abspath(join(dirname(__file__), '../../views/static'))
        }
        # Create application by assigning routes and the location of view files
        app = Application(Router.routes(), **settings)
        
        # Server creation
        server = HTTPServer(app)
        server.bind(context.port)
        server.start(context.process)
        
        # Connect to MongoDB server
        Mongo.init(context.db_configs)
        # This is done so that every incoming request has a pointer to the database connection
        app.settings['db'] = Mongo.get()

        # initiating minio client
        MinioClient.init(context.minio_configs)
        # schedule log file uploader in an interval of hours
        scheduler = TornadoScheduler()
        scheduler.add_job(UploadLogsToMinio.upload_files_to_minio, 'interval', hours=context.logger_configs.log_upload_interval)
        scheduler.start()

        logger.info(f'Listining on port {context.port}')
        IOLoop.current().start()
Exemple #33
0
def start_app():
    try:
        tornado.options.parse_command_line()
        app = Application()
        http_server = tornado.httpserver.HTTPServer(app,
                                                    xheaders=True,
                                                    max_buffer_size=1000 *
                                                    1024 * 1024)
        # http_server.listen(int(settings.get("PORT", "8085")))
        logging.info(u'starting service... ')

        scheduler = TornadoScheduler()
        # scheduler.add_job(ai_server, 'interval', seconds=3)
        if settings.IF_INCREASE_REPORT:
            scheduler.add_job(report_upload, 'cron', hour=4)
        if settings.IF_INCREASE_STUDY:
            scheduler.add_job(study_upload, 'cron', hour=4)
        scheduler.start()

        if settings.IF_AISERVER:
            tornado.ioloop.PeriodicCallback(query_uploadinfo,
                                            2 * 60 * 1000).start()
            tornado.ioloop.PeriodicCallback(outpatient_service,
                                            1 * 60 * 1000).start()
            tornado.ioloop.PeriodicCallback(uploadimage_service,
                                            3 * 60 * 1000).start()
            tornado.ioloop.PeriodicCallback(airesult_service,
                                            3 * 6 * 1000).start()
        # tornado.ioloop.IOLoop.current().spawn_callback(uploadimage_service)
        tornado.ioloop.IOLoop.instance().start()
    except KeyboardInterrupt as e:
        logging.info(u'stoping service...')
        tornado.ioloop.IOLoop.instance().stop()
    except Exception as e:
        logging.exception(e)
        raise e
Exemple #34
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop()
        self.pid = os.getpid()
        self.showlist = []

        self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quite = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = (
            "1x02",
            "s01e02",
            "S01E02",
            "01x02",
            "S01 E02",
        )
        self.naming_multi_ep_type = {
            0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
            1: [" - " + x for x in self.naming_ep_type],
            2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]
        }
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [
            language for language in os.listdir(sickrage.LOCALE_DIR)
            if '_' in language
        ]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca',
                              realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(
            client_id='sickrage-app',
            client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(self.data_dir, 'restore')),
                self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(self.data_dir, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(self.data_dir, 'sickrage.db'),
                    os.path.join(
                        self.data_dir, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                    'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s',
                           traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:

            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username,
                                          self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent',
                                              'transmission', 'deluge',
                                              'deluged', 'download_station',
                                              'rtorrent', 'qbittorrent',
                                              'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m',
                                                        '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(hours=self.config.version_updater_freq),
            name=self.version_updater.name,
            id=self.version_updater.name)

        # add network timezones updater job
        self.scheduler.add_job(update_network_dict,
                               IntervalTrigger(days=1),
                               name="TZUPDATER",
                               id="TZUPDATER")

        # add show updater job
        self.scheduler.add_job(self.show_updater.run,
                               IntervalTrigger(
                                   days=1,
                                   start_date=datetime.datetime.now().replace(
                                       hour=self.config.showupdate_hour)),
                               name=self.show_updater.name,
                               id=self.show_updater.name)

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(minutes=self.config.daily_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name)

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(hours=1,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=4)),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name)

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(minutes=self.config.backlog_searcher_freq,
                            start_date=datetime.datetime.now() +
                            datetime.timedelta(minutes=30)),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name)

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(minutes=self.config.autopostprocessor_freq),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name)

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(minutes={
                '15m': 15,
                '45m': 45,
                '90m': 90,
                '4h': 4 * 60,
                'daily': 24 * 60
            }[self.config.proper_searcher_interval]),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name)

        # add trakt.tv checker job
        self.scheduler.add_job(self.trakt_searcher.run,
                               IntervalTrigger(hours=1),
                               name=self.trakt_searcher.name,
                               id=self.trakt_searcher.name)

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(hours=self.config.subtitle_searcher_freq),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name)

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(seconds=self.upnp_client._nat_portmap_lifetime),
            name=self.upnp_client.name,
            id=self.upnp_client.name)

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.saveToDB()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            try:
                self.log.debug("Loading data for show: [{}]".format(
                    dbData['show_name']))
                self.showlist.append(
                    TVShow(int(dbData['indexer']), int(dbData['indexer_id'])))
                self.quicksearch_cache.add_show(dbData['indexer_id'])
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" %
                               (dbData['location'], str(e)))
Exemple #35
0
class Core(object):
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = AttrDict(
            libnotify=LibnotifyNotifier(),
            kodi_notifier=KODINotifier(),
            plex_notifier=PLEXNotifier(),
            emby_notifier=EMBYNotifier(),
            nmj_notifier=NMJNotifier(),
            nmjv2_notifier=NMJv2Notifier(),
            synoindex_notifier=synoIndexNotifier(),
            synology_notifier=synologyNotifier(),
            pytivo_notifier=pyTivoNotifier(),
            growl_notifier=GrowlNotifier(),
            prowl_notifier=ProwlNotifier(),
            libnotify_notifier=LibnotifyNotifier(),
            pushover_notifier=PushoverNotifier(),
            boxcar_notifier=BoxcarNotifier(),
            boxcar2_notifier=Boxcar2Notifier(),
            nma_notifier=NMA_Notifier(),
            pushalot_notifier=PushalotNotifier(),
            pushbullet_notifier=PushbulletNotifier(),
            freemobile_notifier=FreeMobileNotifier(),
            twitter_notifier=TwitterNotifier(),
            trakt_notifier=TraktNotifier(),
            email_notifier=EmailNotifier()
        )

        # generate metadata providers dict
        self.metadataProviderDict = get_metadata_generator_dict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init scheduler service
        self.srScheduler = TornadoScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                                 os.path.join(sickrage.DATA_DIR, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickbeard.db')), os.path.abspath(os.path.join(sickrage.DATA_DIR, 'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.debugLogging = sickrage.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE
        self.srLogger.logFile = self.srConfig.LOG_FILE

        # start logger
        self.srLogger.start()

        # initialize the main SB database
        main_db.MainDB().InitialSchema().upgrade()

        # initialize the cache database
        cache_db.CacheDB().InitialSchema().upgrade()

        # initialize the failed downloads database
        failed_db.FailedDB().InitialSchema().upgrade()

        # fix up any db problems
        main_db.MainDB().SanityCheck()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for dir in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(self.srConfig.CACHE_DIR, dir), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if not self.srConfig.USE_ANIDB:
            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=lambda msg: self.srLogger.debug(
                    "AniDB: %s " % msg)).auth(self.srConfig.ANIDB_USERNAME, self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole',
                                                'utorrent',
                                                'transmission',
                                                'deluge',
                                                'deluged',
                                                'download_station',
                                                'rtorrent',
                                                'qbittorrent',
                                                'mlnet',
                                                'putio'): self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m', '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = get_backlog_cycle_time()
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        self.srConfig.NEWS_LATEST = self.srConfig.NEWS_LAST_READ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # initialize metadata_providers
        for cur_metadata_tuple in [(self.srConfig.METADATA_KODI, kodi),
                                   (self.srConfig.METADATA_KODI_12PLUS, kodi_12plus),
                                   (self.srConfig.METADATA_MEDIABROWSER, mediabrowser),
                                   (self.srConfig.METADATA_PS3, ps3),
                                   (self.srConfig.METADATA_WDTV, wdtv),
                                   (self.srConfig.METADATA_TIVO, tivo),
                                   (self.srConfig.METADATA_MEDE8ER, mede8er)]:
            (cur_metadata_config, cur_metadata_class) = cur_metadata_tuple
            tmp_provider = cur_metadata_class.metadata_class()
            tmp_provider.set_config(cur_metadata_config)

            self.metadataProviderDict[tmp_provider.name] = tmp_provider

        # add show queue job
        self.srScheduler.add_job(
            self.SHOWQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SHOWQUEUE",
            id="SHOWQUEUE"
        )

        # add search queue job
        self.srScheduler.add_job(
            self.SEARCHQUEUE.run,
            srIntervalTrigger(**{'seconds': 5}),
            name="SEARCHQUEUE",
            id="SEARCHQUEUE"
        )

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.VERSION_UPDATER_FREQ, 'min': self.srConfig.MIN_VERSION_UPDATER_FREQ}),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.srScheduler.add_job(
            update_network_dict,
            srIntervalTrigger(**{'days': 1}),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add namecache updater job
        self.srScheduler.add_job(
            self.NAMECACHE.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.NAMECACHE_FREQ, 'min': self.srConfig.MIN_NAMECACHE_FREQ}),
            name="NAMECACHE",
            id="NAMECACHE"
        )

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{'hours': 1,
                   'start_date': datetime.datetime.now().replace(hour=self.srConfig.SHOWUPDATE_HOUR)}),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.srScheduler.add_job(
            self.DAILYSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.DAILY_SEARCHER_FREQ, 'min': self.srConfig.MIN_DAILY_SEARCHER_FREQ}),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{'minutes': self.srConfig.BACKLOG_SEARCHER_FREQ,
                   'min': self.srConfig.MIN_BACKLOG_SEARCHER_FREQ}),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(**{'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                                 'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ}),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(**{
                'minutes': {'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.srConfig.PROPER_SEARCHER_INTERVAL]}),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.srScheduler.add_job(
            self.TRAKTSEARCHER.run,
            srIntervalTrigger(**{'hours': 1}),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(**{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start webserver
        self.srWebServer.start()

        # start ioloop event handler
        IOLoop.instance().start()

    def shutdown(self, status=None, restart=False):
        if self.started:
            self.started = False

            if restart:
                self.srLogger.info('SiCKRAGE IS PERFORMING A RESTART!')
            else:
                self.srLogger.info('SiCKRAGE IS PERFORMING A SHUTDOWN!')

            # shutdown/restart webserver
            self.srWebServer.shutdown()

            # shutdown scheduler
            self.srLogger.info("Shutting down scheduler")
            self.srScheduler.shutdown()

            # shutdown queues
            self.srLogger.info("Shutting down queues")
            if self.SHOWQUEUE:
                self.SHOWQUEUE.shutdown()
            if self.SEARCHQUEUE:
                self.SEARCHQUEUE.shutdown()

            if sickrage.srCore.ADBA_CONNECTION:
                self.srLogger.info("Logging out ANIDB connection")
                sickrage.srCore.ADBA_CONNECTION.logout()

            # save all settings
            self.save_all()

            if restart:
                self.srLogger.info('SiCKRAGE IS RESTARTING!')
            else:
                self.srLogger.info('SiCKRAGE IS SHUTDOWN!')

            # shutdown logging
            self.srLogger.shutdown()

        # delete pid file
        if sickrage.DAEMONIZE:
            sickrage.delpid(sickrage.PID_FILE)

        # system exit with status
        if not restart:
            sys.exit(status)

        # stop ioloop event handler
        IOLoop.current().stop()

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for sqlShow in main_db.MainDB().select("SELECT * FROM tv_shows"):
            try:
                curshow = TVShow(int(sqlShow["indexer"]), int(sqlShow["indexer_id"]))
                self.srLogger.debug("Loading data for show: [{}]".format(curshow.name))
                #self.NAMECACHE.buildNameCache(curshow)
                curshow.nextEpisode()
                self.SHOWLIST += [curshow]
            except Exception as e:
                self.srLogger.error(
                    "There was an error creating the show in {}: {}".format(sqlShow["location"], e.message))
                self.srLogger.debug(traceback.format_exc())
Exemple #36
0

class AuthLogoutHandler(BaseHandler):

    def get(self):
        self.clear_cookie("live_digg")
        self.redirect(self.get_argument("next", "/"))


class EntryModule(tornado.web.UIModule):

    def render(self, entry):
        return self.render_string("modules/entry.html", entry=entry)


if __name__ == "__main__":
    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)
    scheduler = TornadoScheduler()
    scheduler.add_job(pushmsgs.scraper, 'cron', day_of_week='mon-fri',
                      hour='*', minute='*/15', id="diggScraper")
    scheduler.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        threading.Thread(target=redis_listener).start()
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #37
0
def main():

    application = tornado.web.Application([
        (r'/', IndexHandler),
        (r'/dash', DashHandler),
        (r'/test', TestHandler),
        (r'/google', AnalyticsHandler),
        (r'/assets/(.*)', tornado.web.StaticFileHandler, {'path': './assets'},),
        (r'/ws/', WebSocketHandler)
    ])

    parse_command_line()
    application.listen(options.port)

    sched = TornadoScheduler(daemon=True)
    atexit.register(lambda: sched.shutdown())
    sched.add_job(social_media_fetcher.instagram_counts, 'cron', minute="*/1")
    sched.add_job(social_media_fetcher.twitter_counts, 'cron', minute="*/1")
    sched.add_job(social_media_fetcher.pinterest_counts, 'cron', minute="*/5")
    sched.add_job(social_media_fetcher.youtube_counts, 'cron', minute="*/5")
    sched.add_job(social_media_fetcher.facebook_counts, 'cron', minute="*/1")
    # todo reinstate when keys inserted 
    # sched.add_job(social_media_fetcher.linkedin_count, 'cron', minute="*/5")
    # Google Analytics importer
    sched.add_job(analytic_fetcher.get_results, 'cron', hour="1", minute="1")
    sched.start()

    tornado.ioloop.IOLoop.instance().start()
from apscheduler.schedulers.tornado import TornadoScheduler
from .test_job import test_job


scheduler = TornadoScheduler()
scheduler.add_job(test_job, 'interval', seconds=3)
Exemple #39
0
"""
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from tornado.ioloop import IOLoop
from apscheduler.schedulers.tornado import TornadoScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = TornadoScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #40
0
        # get all routes which belong to user or profile
        # TODO - include employees
        self.write( {"lol":"lmao"} )

@Route(r"/signals")
class SimpleHandler6(tornado.web.RequestHandler):
    def get(self):
        # TODO - get all routes which belong to
        self.write( {"lol":"lmao"} )

app = tornado.web.Application(Route.routes() + [
 (r'/send_message', SendMessageHandler)
] + sockjs.tornado.SockJSRouter(MessageHandler, '/sockjs').urls)

if __name__ == "__main__":
    app.listen(8988)
    #app.listen(8000)
    #app.listen(5000)
    #tornado.ioloop.IOLoop.current().add_callback(print_changes)
    tornado.ioloop.IOLoop.current().add_callback(company_name_to_domain_changes)
    tornado.ioloop.IOLoop.current().add_callback(trigger_changes)

    scheduler = TornadoScheduler()
    scheduler.add_job(AsyncCompanyNameResearch().start, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_company_info_research, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_employee_research, 'interval', seconds=1)
    scheduler.add_job(AsyncCompanyResearch().start_email_pattern_research, 'interval', seconds=1)
    scheduler.start()

    tornado.ioloop.IOLoop.current().start()
    for container in docker_client.containers():
        indexer = DockerPackageIndexer(container, docker_client)
        indexer.index()

if __name__ == '__main__':
    tornado.log.enable_pretty_logging()
    tornado.options.parse_command_line()

    try:
        docker_client = Client(**kwargs_from_env(assert_hostname=False))
        docker_client.ping()
    except ConnectionError:
        logging.error("Unable to connect to Docker. Ensure Docker is running and environment variables are set.")
        exit(1)

    scheduler = TornadoScheduler()
    scheduler.add_job(lambda: index_container_packages(docker_client), 'interval', seconds=tornado.options.options.interval)
    scheduler.start()

    app = tornado.web.Application([
        (r"/", SearchHandler),
    ], debug=tornado.options.options.reload)

    http_server = tornado.httpserver.HTTPServer(app)
    http_server.listen(tornado.options.options.port)

    try:
        tornado.ioloop.IOLoop.current().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #42
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = None
        self.pid = os.getpid()
        self.showlist = []

        try:
            self.tz = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal()
        except Exception:
            self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quiet = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version_string = None

        self.naming_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02d E%(episodenumber)02d")
        self.sports_ep_type = ("%(seasonnumber)dx%(episodenumber)02d",
                               "s%(seasonnumber)02de%(episodenumber)02d",
                               "S%(seasonnumber)02dE%(episodenumber)02d",
                               "%(seasonnumber)02dx%(episodenumber)02d",
                               "S%(seasonnumber)02 dE%(episodenumber)02d")
        self.naming_ep_type_text = ("1x02", "s01e02", "S01E02", "01x02", "S01 E02",)
        self.naming_multi_ep_type = {0: ["-%(episodenumber)02d"] * len(self.naming_ep_type),
                                     1: [" - " + x for x in self.naming_ep_type],
                                     2: [x + "%(episodenumber)02d" for x in ("x", "e", "E", "x")]}
        self.naming_multi_ep_type_text = ("extend", "duplicate", "repeat")
        self.naming_sep_type = (" - ", " ")
        self.naming_sep_type_text = (" - ", "space")

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.sys_encoding = get_sys_encoding()
        self.client_web_urls = {'torrent': '', 'newznab': ''}

        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = {}
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.scheduler = None
        self.wserver = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.event_queue = None
        self.version_updater = None
        self.show_updater = None
        self.tz_updater = None
        self.rsscache_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None
        self.upnp_client = None
        self.oidc_client = None
        self.quicksearch_cache = None

    def start(self):
        self.started = True
        self.io_loop = IOLoop.current()

        # thread name
        threading.currentThread().setName('CORE')

        # patch modules with encoding kludge
        patch_modules()

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.scheduler = TornadoScheduler()
        self.wserver = WebServer()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.event_queue = EventQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.tz_updater = TimeZoneUpdater()
        self.rsscache_updater = RSSCacheUpdater()
        self.daily_searcher = DailySearcher()
        self.failed_snatch_searcher = FailedSnatchSearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()
        self.upnp_client = UPNPClient()
        self.quicksearch_cache = QuicksearchCache()

        # setup oidc client
        realm = KeycloakRealm(server_url='https://auth.sickrage.ca', realm_name='sickrage')
        self.oidc_client = realm.open_id_connect(client_id='sickrage-app',
                                                 client_secret='5d4710b2-ca70-4d39-b5a3-0705e2c5e703')

        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            self.log.info("Restoring SiCKRAGE backup: {}!".format(("FAILED", "SUCCESSFUL")[success]))
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.move_file(os.path.join(self.data_dir, 'sickrage.db'),
                                  os.path.join(self.data_dir, '{}.bak-{}'
                                               .format('sickrage.db',
                                                       datetime.datetime.now().strftime(
                                                           '%Y%m%d_%H%M%S'))))

            helpers.move_file(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                              os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quiet

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # set torrent client web url
        torrent_webui_url(True)

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting disk space: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

            # upgrade database
            db.upgrade()

        # compact main database
        if self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('schedule', 'history', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generate_secret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.failed_snatch_age < self.config.min_failed_snatch_age:
            self.config.failed_snatch_age = self.config.min_failed_snatch_age
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq,
            ),
            name=self.version_updater.name,
            id=self.version_updater.name
        )

        # add network timezones updater job
        self.scheduler.add_job(
            self.tz_updater.run,
            IntervalTrigger(
                days=1,
            ),
            name=self.tz_updater.name,
            id=self.tz_updater.name
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name=self.show_updater.name,
            id=self.show_updater.name
        )

        # add rss cache updater job
        self.scheduler.add_job(
            self.rsscache_updater.run,
            IntervalTrigger(
                minutes=15,
            ),
            name=self.rsscache_updater.name,
            id=self.rsscache_updater.name
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.daily_searcher.name,
            id=self.daily_searcher.name
        )

        # add failed snatch search job
        self.scheduler.add_job(
            self.failed_snatch_searcher.run,
            IntervalTrigger(
                hours=1,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name=self.failed_snatch_searcher.name,
            id=self.failed_snatch_searcher.name
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name=self.backlog_searcher.name,
            id=self.backlog_searcher.name
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name=self.auto_postprocessor.name,
            id=self.auto_postprocessor.name
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={
                    '15m': 15,
                    '45m': 45,
                    '90m': 90,
                    '4h': 4 * 60,
                    'daily': 24 * 60
                }[self.config.proper_searcher_interval]
            ),
            name=self.proper_searcher.name,
            id=self.proper_searcher.name
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name=self.trakt_searcher.name,
            id=self.trakt_searcher.name
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name=self.subtitle_searcher.name,
            id=self.subtitle_searcher.name
        )

        # add upnp client job
        self.scheduler.add_job(
            self.upnp_client.run,
            IntervalTrigger(
                seconds=self.upnp_client._nat_portmap_lifetime
            ),
            name=self.upnp_client.name,
            id=self.upnp_client.name
        )

        # add namecache update job
        self.scheduler.add_job(
            self.name_cache.build_all,
            IntervalTrigger(
                days=1,
            ),
            name=self.name_cache.name,
            id=self.name_cache.name
        )

        # start scheduler service
        self.scheduler.start()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()
        self.event_queue.start()

        # fire off startup events
        self.event_queue.fire_event(self.name_cache.build_all)
        self.event_queue.fire_event(self.version_updater.run)
        self.event_queue.fire_event(self.tz_updater.run)

        # start webserver
        self.wserver.start()

        # launch browser window
        if all([not sickrage.app.no_launch, sickrage.app.config.launch_browser]):
            self.event_queue.fire_event(lambda: launch_browser(('http', 'https')[sickrage.app.config.enable_https],
                                                               sickrage.app.config.web_host,
                                                               sickrage.app.config.web_port))

        # start ioloop
        self.io_loop.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            if self.wserver:
                self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # shutdown event queue
            if self.event_queue:
                self.log.debug("Shutting down event queue")
                self.event_queue.shutdown()
                del self.event_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db]:
                if db.opened:
                    self.log.debug("Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            if self.log:
                self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

        if self.io_loop:
            self.io_loop.stop()

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.save_to_db()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist and quicksearch cache with shows and episodes from the database
        """

        self.quicksearch_cache.load()

        for dbData in self.main_db.all('tv_shows'):
            show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))

            try:
                self.log.debug("Loading data for show: [{}]".format(show.name))
                self.showlist.append(show)
                self.quicksearch_cache.add_show(show.indexerid)
            except Exception as e:
                self.log.debug("Show error in [%s]: %s" % (show.location, str(e)))
class ScheduleServer():
    def __init__(self):
        tornado.options.parse_command_line()
        application = tornado.web.Application(
                [
                    (r'/', IndexPageHandler),

                    # --- PROJECT ---  #
                    # List of project summary.
                    (r'/api/v0/projects', apih.ListProjectSummaryHandler),
                    # Details of a project. Args: project id
                    (r'/api/v0/project/([^/]+)/details', apih.ProjectDetailsHandler),
                    # List of details of resources. Args: project id
                    (r'/api/v0/project/([^/]+)/resource/details', apih.ProjectResourceDetailsHandler),
                    # List of language status. Args: project id 
                    (r'/api/v0/project/([^/]+)/translation/status', apih.ProjectTranslationStatusHandler),

                    # --- JOB --- #
                    # List of jobs.
                    (r'/api/v0/jobs', apih.ListJobSummaryHandler),
                    # Summary of a job. Args: job id
                    (r'/api/v0/job/([^/]+)', apih.JobSummaryHandler),
                    # Execute a job. Args: job id
                    (r'/api/v0/job/([^/]+)/exec', apih.JobExecutionHandler),
                    # Details of a job. Args: job id
                    (r'/api/v0/job/([^/]+)/details', apih.JobDetailsHandler),
                    # List of resource file path and slug. Args: job id 
                    (r'/api/v0/job/([^/]+)/resource/slugs', apih.JobResourceSlugsHandler),
                    # List of resource name (in translation platform) and slug. Args: job id
                    (r'/api/v0/job/([^/]+)/translation/slugs', apih.JobTranslationSlugsHandler),
                    # Sync status (only for ResourceUploaderJob or TranslationUploaderJob). Args: job id
                    (r'/api/v0/job/([^/]+)/sync/status', apih.JobSyncStatusHandler),
                    # Job execution status. Args: job id
                    (r'/api/v0/job/([^/]+)/exec/status', apih.JobExecStatusHandler),

                    # maybe /job/(^/]+)/log/context/3  (limit = 3) might be useful

                    # --- CONFIGURATION --- #
                    # not using now but keep for a while   
                    # (r'/api/v0/config/([^/]+)/([^/]+)', apih.ConfigurationHandler), # job id, 'key' in config file
                    # Contents of project configuration file. Args: project id
                    # (r'/api/v0/config/project/([^/]+)', apih.ProjectConfigurationHandler),
                    # Contents of job configuration file. Args: job id
                    # (r'/api/v0/config/job/([^/]+)', apih.JobConfigurationHandler),
                    # Contents of resource configuration file. Args: resource configuration file name
                    (r'/api/v0/config/resource/([^/]+)', apih.ResourceConfigurationHandler),
                    # Contents of translation configuration file. Args: translation configuration file name
                    (r'/api/v0/config/translation/([^/]+)', apih.TranslationConfigurationHandler),

                    #--- LOG (JOB EXECUTION LOG) --- #
                    # Log context. Args: log path
                    (r'/api/v0/log/([^/]+)/context', apih.LogContextHandler),

                    # --- LOCAL FILES --- #
                    # List local repositories (git repository).
                    (r'/api/v0/local/repositories', apih.ListLocalRepositoriesHandler),
                    # List branches of specified local git repository. Args: repository name
                    (r'/api/v0/local/repository/([^/]+)/branches', apih.ListLocalRepositoryBranchesHandler),
                    # List local files under specified directory. Args: repository name, relative path in the repository
                    (r'/api/v0/local/repository/([^/]+)/files/([^/]+)', apih.ListLocalRepositoryFilesHandler),

                    # --- RESOURCE REPOSITORY --- #
                    # List of repositories. Args: platform name
                    #(r'/api/v0/resource/([^/]+)/repositories', apih.ListResourceRepositoriessHandler),

                    #--- TRANSLATION REPOSITORY --- #
                    # List of projects. Args: platform name
                    (r'/api/v0/translation/([^/]+)/projects', apih.ListTranslationProjectsHandler),
                    # Project details. Args: platform name, project id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/details', apih.TranslationProjectDetailsHandler),
                    # Resource details. Args: platform name, project id, resource id
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/details', apih.TranslationResourceDetailsHandler),
                    # All strings for a language. Args: platform name, project id, resource id, language code
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/strings', apih.TranslationTranslationStringsHandler),
                    # Details for a translation string. Args: platform name, project id, resource id, source string id
                    # (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/translation/([^/]+)/details', apih.TranslationTranslationStringDetailsHandler),
                    # Details for a source string. Args: platform name, project id, resource id, source string id (key)
                    (r'/api/v0/translation/([^/]+)/project/([^/]+)/resource/([^/]+)/source/([^/]+)/details', apih.TranslationSourceStringDetailsHandler)
                ],
                template_path = os.path.join(os.path.dirname(__file__), 'templates'),
                static_path = os.path.join(os.path.dirname(__file__), 'static')
        )
        self.http_server = tornado.httpserver.HTTPServer(application)

        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }

        logger.info("Initializing scheduler (pid: {}, port: '{}')...".format(os.getpid(), settings.HTTP_PORT))
        self.scheduler = TornadoScheduler()
        self.scheduler.configure(executors = executors)
        self._restore_jobs()
        self.scheduler.start()
        logger.info(self.scheduler.print_jobs())

    def _restore_jobs(self):
        global job
        configs = job.get_configuration(status='active')
        total = 0
        for c in configs:
            o = SchedulerJob(c)
            self.scheduler.add_job(o.execute, 'cron', month=c.month, day=c.day, day_of_week=c.day_of_week, hour=c.hour, minute=c.minute, name=c.name, id=c.id, misfire_grace_time = 600)
            total += 1
        logger.info("Restored '{}' jobs.".format(total))

    # @classmethod
    def start(self):
        signal.signal(signal.SIGINT, self._signal_handler)
        self.http_server.listen(settings.HTTP_PORT)
        tornado.ioloop.IOLoop.current().start()

    def _signal_handler(self, signal_type, frame):
        if signal_type == signal.SIGINT:
            logger.info('SIGINT')
        else:
            logger.warning('Unknown signal')
        self.terminate()

    # @classmethod
    def terminate(self):
        logger.info('Stopping scheduler...')
        self.scheduler.shutdown()
        tornado.ioloop.IOLoop.current().stop()
        sys.exit(0)