Beispiel #1
0
 def task_schedule(self):
     scheduler = BlockingScheduler()
     try:
         scheduler.add_job(self._get_task, 'interval', seconds=30)
         scheduler.start()
     except Exception as e:
         print(e)
Beispiel #2
0
    def run(self):
        setup_logging()
        log = logging.getLogger('hermes_cms.service.runner')

        while True:
            try:
                config = Registry().get(self.config_file)
            # pylint: disable=broad-except
            except Exception as e:
                log.exception(e)

            module_name = config['jobs'][self.name]['module_name']
            class_name = config['jobs'][self.name]['class_name']

            mod = __import__(module_name, fromlist=[class_name])
            service_class = getattr(mod, class_name)

            job_class = service_class(self.name, self.region, config)

            seconds = int(config['jobs'][self.name]['frequency'])

            scheduler = BlockingScheduler()
            scheduler.add_job(job_class.do_action, IntervalTrigger(seconds=seconds))
            log.info('Starting Scheduled job %s', self.name)
            scheduler.start()
Beispiel #3
0
class PeriodGather(object):
    def __init__(self, db): 
        self.__db = db
        self.__scheduler = BlockingScheduler()
        self.__scheduler.add_job(self.gather, 'cron', day_of_week='mon-fri', hour=16, minute=30)

    def start(self):
        self.__scheduler.start()

    def gather(self):
        _logger.info('period gather stock basic and history data, begin.....')
        try:
            StockBasicCollector(self.__db).collect()
            stock_list = self.__get_stock_list()
            for stock in stock_list:
                HistDataCollector(stock, self.__db).collect()
        except Exception as e:
            _logger.exception(e)
        _logger.info('period gather stock basic and history data, end.....')
    
    def __get_stock_list(self):
        collection = Collection(Constants.BASIC_COLLECTION, self.__db)
        stock_infos = collection.find()
        stock_list = []
        for stock_info in stock_infos:
            stock_list.append(stock_info['code'])
        return stock_list

    def stop(self):
        if self.__scheduler:
            self.__scheduler.shutdown()
Beispiel #4
0
class TalosCollectorCron(object):
    """docstring for talosCollectorCron"""
    def __init__(self, redis_conn,config_path):
        super(TalosCollectorCron, self).__init__()
        self.redis_conn  = redis_conn
        self.config_path = config_path
        self.scheduler = BlockingScheduler()
        json_config    = open(config_path, 'r')
        # print json_config.read()
        self.jsons = json.loads(json_config.read())

    def myjob(self,c):
        # 获取日期
        # 获取HOST信息
        # analyzer
        if c['enable']:
            tmp = os.popen(c['cmd']).readlines()
            data = {}
            data['content']  = tmp
            data['analyzer'] = c['analyzer']
            data['host']     = socket.gethostname()
            data['date']     = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            data['param']    = c['param']
            r.publish('talos:q:cmd',json.dumps(data))

    def start(self):
        for c in self.jsons:
            cron = c['time'].split(' ')
            job = self.scheduler.add_job(self.myjob,args=[c],trigger='cron', year=cron[5], month=cron[4], day=cron[3], hour=cron[2], minute=cron[1], second=cron[0])
        print 'TalosCollectorCron Start..'
        self.scheduler.start()
class BGPTableDownload(basesinfonierspout.BaseSinfonierSpout):

    def __init__(self):

        basesinfonierspout.BaseSinfonierSpout().__init__()

    def useropen(self):
        
        self.interval = int(self.getParam("frequency"))        
        
        self.sched = BlockingScheduler()
        self.sched.add_job(self.job, "interval", seconds=self.interval, id="bgptable")
        self.sched.start()

    def usernextTuple(self):

        pass
        
    def job(self):
        
        query = "http://bgp.potaroo.net/v6/as2.0/bgptable.txt"
        self.log(query)
        headers = {
            "User-Agent" : "Mozilla/5.0 (Windows NT 6.2; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0"
        }
        r = requests.get(query, headers=headers)
        self.emit()
def test5():
    """定时执行任务,关闭调度器"""
    sched = BlockingScheduler()
    sched.add_job(my_job, 'date',run_date=datetime(2016, 8, 16, 12, 34,5), args=('123',),seconds=1, id='my_job_id') 
    # add_job的第二个参数是trigger,它管理着作业的调度方式。它可以为date, interval或者cron。
    sched.start()
    print('定时任务')
def test1():
    """定时执行任务"""
    start_time = time.time()
    sched = BlockingScheduler()
    sched.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    sched.start() # 程序运行到这里,并不往后运行,除非把任务都完成,但ctrl+C 可以终止
    print('运行不到这里')
Beispiel #8
0
def main(argv):
	if len(argv) > 1:
		#initialize some variables
		pass
	scheduler = BlockingScheduler()
	scheduler.add_job(link, "interval", hours=1, id="link_job")
	scheduler.start()
Beispiel #9
0
class CrawlScheduler(object):

    def __init__(self, crawler):
        self.crawler = crawler
        self.scheduler = BlockingScheduler()

    def start(self):
        logging.info('=============================================')
        logging.info('[{0}] Start crawling from Instagram...'.format(datetime.datetime.now()))
        crawling_start_time = time.time()
        self.crawler.crawl()
        crawling_end_time = time.time()
        time_spent = int(crawling_end_time - crawling_start_time)
        logging.info('Time spent: {0}min {1}s'.format(time_spent / 60, time_spent % 60))
        logging.info('=============================================')

    @staticmethod
    def get_nearest_start_time():
        nearest_start_timestamp = long(time.time() / (60 * 15) + 1) * 60 * 15
        return datetime.datetime.fromtimestamp(nearest_start_timestamp)

    def start_scheduler(self, should_continue=False):
        # Config logging and alarm.
        logging.basicConfig(filename=self.crawler.get_crawl_log(), level=logging.DEBUG)

        scheduler_start_time = self.get_nearest_start_time()
        redis_client.set(self.crawler.get_redis_end_time_key(), str(scheduler_start_time))
        if not should_continue:
            redis_client.set(self.crawler.get_redis_start_time_key(),
                             str(scheduler_start_time - datetime.timedelta(minutes=14, seconds=59)))
        self.scheduler.add_job(self.start, 'interval', start_date=scheduler_start_time, minutes=15, misfire_grace_time=600)
        self.scheduler.start()
def go_sched():

	sched = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

	startTime = datetime.datetime.now()+datetime.timedelta(seconds=10)
	scheduleJobs(sched, startTime)
	sched.start()
Beispiel #11
0
class PeriodicRetrievalManager(RetrievalManager):
    """
    Manages the periodic retrieval of updates.
    """
    def __init__(self, retrieval_period: TimeDeltaInSecondsT, update_mapper: UpdateMapper,
                 logger: Logger=PythonLoggingLogger()):
        """
        Constructor.
        :param retrieval_period: the period that dictates the frequency at which data is retrieved
        :param update_mapper: the object through which updates can be retrieved from the source
        :param logger: log recorder
        """
        super().__init__(update_mapper, logger)
        self._retrieval_period = retrieval_period
        self._running = False
        self._state_lock = Lock()
        self._updates_since = None  # type: datetime

        self._scheduler = BlockingScheduler()
        self._scheduler.add_job(self._do_periodic_retrieval, "interval", seconds=self._retrieval_period, coalesce=True,
                                max_instances=1, next_run_time=datetime.now())

    def run(self, updates_since: datetime=datetime.min):
        self._updates_since = localise_to_utc(updates_since)

        with self._state_lock:
            if self._running:
                raise RuntimeError("Already running")
            self._running = True
        self._scheduler.start()

    def start(self, updates_since: datetime=datetime.min):
        """
        Starts the periodic retriever in a new thread. Cannot start if already running.
        :param updates_since: the time from which to get updates from (defaults to getting all updates).
        """
        Thread(target=self.run, args=(updates_since, )).start()

    def stop(self):
        """
        Stops the periodic retriever.
        """
        with self._state_lock:
            if self._running:
                self._scheduler.shutdown(wait=False)
                self._running = False
                logging.debug("Stopped periodic retrieval manger")

    def _do_periodic_retrieval(self):
        assert self._updates_since is not None
        updates = self._do_retrieval(self._updates_since)

        if len(updates) > 0:
            # Next time, get all updates since the most recent that was received last time
            self._updates_since = updates.get_most_recent()[0].timestamp
        else:
            # Get all updates since same time in future (not going to move since time forward to simplify things - there
            # is no risk of getting duplicates as no updates in range queried previously). Therefore not changing
            # `self._updates_since`.
            pass
Beispiel #12
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler=BlockingScheduler()
        self.scheduler.start()
    def start(self):
        self.oncescheduler.start()
    def addschedule(self,event=None, day_of_week='0-7', hour='11',minute='57' ,second='0',id='',type='cron',run_date='',args=None):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        if type=='date':
            if run_date=='':

                self.oncescheduler.add_job(event, args=args)


            else:

                self.oncescheduler.add_job(event, 'date', run_date=run_date, args=args)
        elif type=='back':
            self.oncescheduler.add_job(event,type, day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)
        else:

            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
Beispiel #13
0
def run():
    sched = BlockingScheduler()
    sched.add_job(main.run, "cron", hour="7,11,17")

    try:
        sched.start()
    except KeyboardInterrupt:
        pass
Beispiel #14
0
 def task_schedule(self):
     scheduler = BlockingScheduler()
     try:
         scheduler.add_job(self._get_task, 'cron', day='1-31', hour=self.sche_time[0],
                           minute=self.sche_time[1], second=self.sche_time[2])
         scheduler.start()
     except Exception as e:
         print(e)
def main():
    """Run tick() at the interval of every ten seconds."""
    scheduler = BlockingScheduler(timezone=utc)
    scheduler.add_job(tick, 'interval', seconds=10)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Beispiel #16
0
def cronjob():
    scheduler = BlockingScheduler()
    print "*******"
    scheduler.add_job(checkupdate,'cron', second='0', hour='2',minute='0')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Beispiel #17
0
def startCrawlerTask():
    from apscheduler.schedulers.blocking import BlockingScheduler
    scheduler = BlockingScheduler()
    scheduler.add_job(crawlerTask, 'cron', second='0',minute='15', hour='8')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Beispiel #18
0
def main():
    sched = BlockingScheduler()
    sched.add_job(spider.spider(), 'interval', seconds=21600)
    print 'Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')

    try:
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Beispiel #19
0
def main():
    scheduler = BlockingScheduler()
    scheduler.add_job(kick_off_script, 'interval', seconds=60)
    print('Press Ctrl+C to exit')

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
    def run(self):
        scheduler = BlockingScheduler()
        cron_jobs = [NotificationJob(), TransferJob()]

        for cron_job in cron_jobs:
            trigger = cron_job.trigger()
            scheduler.add_job(cron_job.run, **trigger )
        logger.info('running CronJobTaskRunner')
        scheduler.start()
Beispiel #21
0
def schedule_task():
    
    sched = BlockingScheduler()
    @sched.scheduled_job('interval', hours=1)
    def timed_job():
        notifications = session.query(Notification).all()
        for n in notifications:
            send_text(n)
        print("ran job")
    sched.start()
Beispiel #22
0
def schedJobs(funcToRun):
    logging.basicConfig()
    scheduler = BlockingScheduler()
    datenow = datetime.datetime.now()
    print("main scheduler started for jobs @" + str(datenow))

#    rs = scheduler.add_job(subtwo, 'interval', id="MainTaskid", name="maintask", start_date=datetime.datetime.now(), seconds=3, jobstore='default')
    rs = scheduler.add_job(funcToRun, trigger="interval", id="mainSchedJobID", name="mainSchedJob", jobstore='default', executor='default', replace_existing=False, minutes=mainSchedJobsInterval)
    print("Running Tasks")
    scheduler.start()
Beispiel #23
0
    def run(self):
        self.setup_logging()

        scheduler = BlockingScheduler()
        weather = Weather(scheduler, zip=self._args['zip'], station=self._args['station'])
        dimmer = Dimmer(scheduler)
        display = Display(weather, dimmer)

        display.start()
        scheduler.start()
def test7():
    """定时执行任务,通过ctrl+c终止"""
    scheduler = BlockingScheduler()
    scheduler.add_job(tick, 'interval', seconds=1)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        scheduler.start(paused=False) # 后面的并没有执行,若参数paused为真,则并不执行
    except (KeyboardInterrupt, SystemExit):
        pass
    print('ok')
Beispiel #25
0
def main():
    ceilometer = create_ceilomenter_client()
    resources = ceilometer.resources.list()
    for i in resources:
        # print '\n'
        print i.resource_id
    # Run this job in certian time, with parameter 'text'
    sched = BlockingScheduler()
    sched.add_job(my_job, 'interval', seconds=5, args=['test'])
    sched.start()
Beispiel #26
0
def bob_job():

    me, password = email_login()
    sched = BlockingScheduler()

    @sched.scheduled_job('cron', day_of_week='mon,tue,wed,thu,fri', hour=17)
    def scheduled_job():
        job.run(me, password)

    sched.start()    
Beispiel #27
0
def startschedule():
    scheduler = BlockingScheduler()
    scheduler.add_job(test,'cron', second='*/3', hour='*')    
    scheduler.add_job(dotraveljobs,'cron', second='*/3', hour='*')
    scheduler.add_job(doindexjobs,'cron', second='*/3', hour='*')
    scheduler.add_job(docontentjobs,'cron', second='*/3', hour='*')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()     
def main():
    repo_slugs = ['start-jsk/jsk_apc']
    gh_repos_handler = GitHubReposHandler(repo_slugs)
    scheduler = BlockingScheduler(logger=logger)
    scheduler.add_job(gh_repos_handler.send_empty_pr,
                      trigger='interval', minutes=5)
    scheduler.add_job(gh_repos_handler.close_ci_success_empty_pr,
                      trigger='interval', minutes=5)
    scheduler.print_jobs()
    scheduler.start()
Beispiel #29
0
def main(generate_once, minutes):
    generate()

    if not generate_once:
        print('Starting schedule, every {} minutes'.format(minutes))
        scheduler = BlockingScheduler()
        scheduler.add_job(generate, 'interval', minutes=1)
        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Beispiel #30
0
def main():
	scheduler = BlockingScheduler()
	routine = Routine()
	job = scheduler.add_job(routine.check_fresh_dd, 'interval', minutes=1)
	try:
		scheduler.start()
	except (KeyboardInterrupt, SystemExit):
		pass

# if __name__ == "__main__":
# 	main()
Beispiel #31
0
# @Author  : Fcvane
# @Param   :
# @File    : 8.py
# coding=utf-8
"""
Demonstrates how to use the background scheduler to schedule a job that executes on 3 second
intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.blocking import BlockingScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_job(tick, 'date', run_date='2018-08-06 17:00:05')

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()  # 采用的是阻塞的方式,只有一个线程专职做调度的任务
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()
        print('Exit The Job!')
Beispiel #32
0
    def plantaskdsfsdfsafdsafsd(self,
                                func,
                                targger="cron",
                                args=None,
                                year=None,
                                month=None,
                                week="*",
                                day_of_week='*',
                                day=None,
                                hour=None,
                                minute=None,
                                second='0'):
        """添加定时器任务、计划任务

        func 函数名

        args 函数参数  例:(1,)

        year 年

        month 月

        day 日

        hour 几点

        minute 几分

        second 几秒
        """
        BlockingSchedulers = BlockingScheduler()
        if targger == 'cron':
            BlockingSchedulers.add_job(func,
                                       targger,
                                       args=args,
                                       year=year,
                                       month=month,
                                       week=week,
                                       day_of_week=day_of_week,
                                       day=day,
                                       hour=hour,
                                       minute=minute,
                                       second=second)
        elif targger == 'interval':
            if day:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           days=int(day))
            elif hour:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           hours=int(hour))
            elif minute:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           minutes=int(minute))
            elif second:
                BlockingSchedulers.add_job(func,
                                           targger,
                                           args=args,
                                           seconds=int(second))
        try:
            BlockingSchedulers.start()
        except:
            BlockingSchedulers.shutdown()
Beispiel #33
0
def run():
    main()
    sch = BlockingScheduler()
    sch.add_job(main, 'interval', minutes=30)  # 每30分钟抓取一次
    sch.start()
Beispiel #34
0
class TiebaSign:
    """贴吧签到"""
    JOB_STORE_ERROR_RETRY = 'error_retry'
    """失败重试"""
    def __init__(self):
        self.config_file = 'ignore/sign.conf'
        self.log_file = 'ignore/sign.log'
        self.lock_file = 'ignore/sign.lock'

        self.logger: logging = None
        self.scheduler: BlockingScheduler = None

        self.sign_url = ''
        """签到地址"""

        self.sign_hour = ''
        """签到触发小时,用于配置 add_job cron 对应的 hour 参数"""

        self.error_retry_times = 0
        """错误重试次数"""

        self.max_error_retry_times = 5
        """最大重试次数"""

    def init_logger(self):
        if self.logger is None:
            self.logger = self.get_logger(self.log_file)

    @staticmethod
    def get_logger(file):
        file_handler = logging.FileHandler(file, encoding='utf-8')

        fmt = '%(asctime)s %(message)s'
        date_fmt = "%Y-%m-%d %H:%M:%S"
        formatter = logging.Formatter(fmt=fmt, datefmt=date_fmt)
        file_handler.setFormatter(formatter)

        logger = logging.getLogger('sign')
        logger.addHandler(file_handler)
        logger.setLevel(logging.INFO)
        return logger

    def init_config(self):
        """初始化配置"""
        config = configparser.ConfigParser()
        config.read(self.config_file)
        if 'sign' in config:
            sign = config['sign']
            if 'sign_url' in sign:
                self.sign_url = sign['sign_url']
            if 'sign_hour' in sign:
                self.sign_hour = sign['sign_hour']

    def check_lock(self):
        """
        检查锁
        如果存在,更新时间
        如果不存在,停止任务
        """
        if os.path.exists(self.lock_file):
            self.write_lock()
        else:
            # 文件不存在,停止程序
            if self.scheduler is not None:
                self.scheduler.shutdown(wait=False)
                self.log(f'{self.lock_file} 不存在,停止任务')
                exit(1)

    def write_lock(self):
        """更新锁文件记录的时间"""
        with open(self.lock_file, 'w') as f:
            message = time.strftime('Now is %Y%m%d %H:%M:%S %z.')
            if self.scheduler:
                jobs = self.scheduler.get_jobs()
                if jobs:
                    for job in jobs:
                        message += f'\n{job}'
            f.write(message)

    def sign(self):
        """签到"""
        error_message = ''  # 成功时为空,失败时为错误信息
        try:
            result = requests.get(self.sign_url)
            result = result.json()
            sign_message = self.get_sign_message(result)
            if sign_message:
                self.log('请求结果:' + sign_message)
                if not self.check_sign_message(sign_message):
                    error_message = sign_message  # 记为错误
            else:
                error_message = str(result)  # 记为错误
        except Exception as e:
            error_message = str(e)
        if not error_message:
            self.error_retry_times = 0  # 成功置 0
        else:
            # 先移除
            self.scheduler.remove_all_jobs(TiebaSign.JOB_STORE_ERROR_RETRY)
            self.error_retry_times += 1  # 失败 +1
            if self.error_retry_times > self.max_error_retry_times:
                self.log(
                    f'已经失败 {self.error_retry_times} 次,不再重试:{error_message}')
                self.error_retry_times = 0
            else:
                self.log(
                    f'请求失败了,10 分钟后第 {self.error_retry_times} 重试:{error_message}'
                )
                run_date = datetime.datetime.now() + datetime.timedelta(
                    minutes=10)
                self.scheduler.add_job(
                    self.sign,
                    trigger='date',
                    run_date=run_date,
                    jobstore=TiebaSign.JOB_STORE_ERROR_RETRY)

    def check_sign_message(self, sign_message: str) -> bool:
        """检查签到结果是否成功"""
        if not sign_message:
            return False
        # [签到]论坛帐号 1/1/1+0,[回贴]用户 1/1/1+0
        # [签到]论坛帐号 1/1/0+1,百度帐号 6/6/6,[回贴]用户 1/1/0+1,百度帐号 3/3/0+3
        # [签到]论坛帐号 1/1/0+0,[回贴]用户 1/1/0+1,百度帐号 3/3/0+3
        all_match = re.findall(r'\[(.+?)\].+?(\d+)/(\d+)/(\d+)\+(\d+)',
                               sign_message)
        if not all_match:
            return False
        for m in all_match:
            name, all_count, need_count, pre_success, cur_success = m
            if int(need_count) != int(pre_success) + int(cur_success):
                self.log(f'{name} 未完全成功,认为失败')
                return False
        return True

    @staticmethod
    def get_sign_message(result):
        """
        获取返回结果的签到信息
        用于减少 log 字数
        """
        if result and isinstance(result, dict):
            if 'code' not in result.keys():
                return ''
            if result['code'] != 200:
                return ''
            if 'data' not in result.keys():
                return ''
            data = result['data']
            if data and isinstance(data, dict):
                if 'signMessage' in data.keys():
                    return data['signMessage']

    def start(self):
        """启动"""
        self.init_logger()
        self.init_config()
        if not self.sign_url:
            self.log(f'未在 {self.config_file} 中 [sign] 内配置 sign_url')
            exit(1)
        if not self.sign_hour:
            self.log(f'未在 {self.config_file} 中 [sign] 内配置 sign_hour')
            exit(1)
        # 首次写入锁
        self.write_lock()

        # 服务器上时区不一致,手动指定
        timezone = pytz.timezone('Asia/Shanghai')
        self.scheduler = BlockingScheduler(timezone=timezone)
        self.scheduler.add_jobstore('memory', TiebaSign.JOB_STORE_ERROR_RETRY)
        # 每分钟触发一次
        self.scheduler.add_job(self.check_lock, trigger='cron', second='0')
        # 早 6 点一次,晚 10 点一次
        self.scheduler.add_job(self.sign, trigger='cron', hour=self.sign_hour)
        self.scheduler._logger = self.logger
        self.log('开始运行')
        try:
            self.scheduler.start()
        except KeyboardInterrupt:
            self.log('按键中断')
        except SystemExit:
            self.log('异常退出')

    def log(self, text):
        now = time.strftime('%Y%m%d %H:%M:%S %z')
        print(f'[{now}] {text}')
        self.logger.info(text)
Beispiel #35
0
def dojob():
    job()
    scheduler = BlockingScheduler()
    scheduler.add_job(job, 'interval', seconds=3600, id='test_job1')
    # scheduler.add_job(spacejob(), 'interval', seconds=3600, id='test_job2')
    scheduler.start()
Beispiel #36
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job
class WorkInfoCollector :
	def __init__(self, cfg) :
		self.cfg 				= cfg
		self.WORKINFO_REPO 		= {}

		self._initConfig()

	def _initConfig(self) :
		self.systemName 		= self.cfg.get('MODULE_CONF', 'TACS_SYSTEM_NAME')
		self.workInfoBaseDir    = self.cfg.get('MODULE_CONF', 'TACS_WORKINFO_RAW')

		self.auditLogTempDir	= self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_TEMP')
		self.auditLogBaseDir	= self.cfg.get('MODULE_CONF', 'TACS_AUDITLOG_PATH')
		self.receivedWorkCode	= self.cfg.get('MODULE_CONF', 'RECEIVED_WORK_CODE')

		self.tangoWmWorkInfoUrl = self.cfg.get('MODULE_CONF', 'TANGO_WM_WORKINFO_URL')
		self.tangoWmEqpInfoUrl	= self.cfg.get('MODULE_CONF', 'TANGO_WM_EQPINFO_URL')
		self.xAuthToken			= self.cfg.get('MODULE_CONF', 'TANGO_WM_X_AUTH_TOKEN')

		self.host               = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_HOST')
		self.port               = int(self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PORT'))
		self.user               = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_USER')
		self.passwd             = self.cfg.get('MODULE_CONF', 'TANGO_WM_SFTP_PASSWD')

		self.scheduleInterval   = self.cfg.get('MODULE_CONF', 'SCHEDULE_INTERVAL_MIN')

		self.stdoutSleepTime	= int(self.cfg.get('MODULE_CONF', 'STDOUT_SLEEP_TIME'))

		self.headers = {'x-auth-token' : self.xAuthToken, 'Content-Type' : 'application/json; charset=utf-8'}
		self.migration			= False

		self.errFilePath		= self.cfg.get('MODULE_CONF', 'ERROR_FILE_PATH')
		self.searchStartDate	= None
		self.searchEndDate		= None
		self.migrationProcFlag 	= False

	def _executeMigration(self, searchStartDate, searchEndDate) :
		__LOG__.Trace('migration process start. searchStartDate({}), searchEndDate({})'.format(searchStartDate, searchEndDate))
		try :
			searchStartDateObj 	= datetime.strptime(searchStartDate, '%Y%m%d%H%M%S')
			searchEndDateObj	= datetime.strptime(searchEndDate, '%Y%m%d%H%M%S')
	
			if searchStartDateObj > searchEndDateObj :
				__LOG__.Trace('searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate))
				print '[ERROR] searchStartDate({}) bigger than searchEndDate({})'.format(searchStartDate, searchEndDate)
			else :
				# request workInfo
				workIdList = self._lookupWorkInfo(searchStartDate, searchEndDate, True)
				# request eqpInfo by workId
				self._lookupEqpInfo(workIdList)
		except Exception as ex :
			__LOG__.Trace('workInfo migration failed. {}'.format(ex))

	def _executeScheduler(self) :
		try :
			__LOG__.Trace('scheduler process start')
			# request workInfo
			workIdList = self._lookupWorkInfo()
			# request eqpInfo by workId
			self._lookupEqpInfo(workIdList)
		except :
			__LOG__.Exception()

	def _stdout(self, msg) :
		sys.stdout.write('stdout' + msg + '\n')
		sys.stdout.flush()
		__LOG__.Trace('stdout: %s' % msg)

	def createDateFile(self, fileName) :
		self._mkdirs(self.errFilePath)
		fullFilePath	= os.path.join(self.errFilePath, fileName)

		if not os.path.exists(fullFilePath) :
			__LOG__.Trace( 'Tango-WM Request Fail File Create : {}'.format(fullFilePath) )
			with open(fullFilePath, 'w') as dateFile :
				dateFile.write('')

	def _lookupWorkInfo(self, fromDate = None, toDate = None, migration = False) :
		self.searchStartDate = fromDate
		self.searchEndDate	= toDate

		if not migration :
			searchEndDateObj  	= datetime.now()
			#searchStartDateObj  = datetime(searchEndDateObj.year, searchEndDateObj.month, searchEndDateObj.day, searchEndDateObj.hour, (searchEndDateObj.minute - int(self.scheduleInterval)))
			searchStartDateObj	= searchEndDateObj - timedelta(minutes=1)

 			self.searchStartDate  	= searchStartDateObj.strftime('%Y%m%d%H%M')
 			self.searchEndDate 		= searchEndDateObj.strftime('%Y%m%d%H%M')
	
		__LOG__.Trace('lookup workInfo from({}) ~ to({})'.format(self.searchStartDate, self.searchEndDate))

 		url = self.tangoWmWorkInfoUrl.format(self.systemName, self.searchStartDate, self.searchEndDate)
 		__LOG__.Trace('request workInfo url: {}'.format(url))

		rawDict = self._requestGet(url)
		return self._loadWorkInfo(rawDict)

	def _lookupEqpInfo(self, workIdList) :
		if not workIdList :
			__LOG__.Trace('workIdList is empty')
		else :
			logDictList = list()
			yyyyMMdd	= None
			eventDate	= None

			for oneWorkId in workIdList :
				url = self.tangoWmEqpInfoUrl.format(self.systemName, oneWorkId)
				__LOG__.Trace('request eqpInfo url: {}'.format(url))

				rawDict = self._requestGet(url)
				logDict, yyyyMMdd, eventDate = self._loadEqpInfo(oneWorkId, rawDict, logDictList)
				logDictList.append(logDict)
			if rawDict :
				self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDictList)
			else :
				__LOG__.Trace('eqpInfo Dict is None {}'.format(rawDict))

	def _requestGet(self, url, verify = False) :
		rawDict 	= None
		response	= None

#		fromReRequestDate, toReRequestDate = self.readDateFile().split(',')

		try :
			response 	= requests.get(url = url, headers = self.headers, verify = verify)
			
			if response != None and response.status_code == 200 :
#				if not self.migrationProcFlag and not migration and fromReRequestDate != '0' and toReRequestDate != '0' :
#					self.migrationProcFlag = True

#					proc = subprocess.Popen(['sh', self.migrationPath, fromReRequestDate, toReRequestDate], stdout=subprocess.PIPE)
#					migrationResult, err = proc.communicate()

#					if 'ERROR' in migrationResult :
#						__LOG__.Trace('!!! Exception !!! migration Error : \n %s' % migrationResult)
#					else :
#						self.updateDateFile()

#					self.migrationProcFlag = False

				#jsonText = response.text.decode('string_escape')
				#__LOG__.Trace('raw response.text: {}'.format(jsonText))
				#__LOG__.Trace('replace response.text: {}'.format(jsonText.replace('\\\\\\"', '\\\"')))
				#__LOG__.Trace('replace response.text: {}'.format(jsonText))
				#tmpDict = json.loads(response.text)
				#__LOG__.Trace('tmpDict: {}'.format(tmpDict))
				#__LOG__.Trace('tmpDict.dumps: {}'.format(json.dumps(tmpDict, ensure_ascii=False)))
				rawDict = response.json()
				#rawDict = json.loads(jsonText)
			else :
				__LOG__.Trace('!!! Exception !!! requestGet failed. statusCode: {}'.format(response.status_code))
				self.createDateFile('{}_{}'.format(self.searchStartDate, self.searchEndDate))
				pass

		except :
			__LOG__.Exception()
			self.createDateFile('{}_{}'.format(self.searchStartDate, self.searchEndDate))
			pass

		return rawDict

	def _loadWorkInfo(self, rawDict) :
		if rawDict :
			__LOG__.Trace('workInfo rawData: {}'.format(rawDict))
			workIdList = []

			if type(rawDict['workInfo']) is list :
				for oneWorkInfo in rawDict['workInfo'] :
					workId = oneWorkInfo['workId']
					__LOG__.Trace('workId: {}'.format(workId))
					if workId is None or not workId :
						__LOG__.Trace('invalid workId({})'.format(workId))
						continue

					workIdList.append(workId)
															
					wrapper 			= {}
					wrapper['workInfo'] = oneWorkInfo
					
					workEvntDate = datetime.now().strftime('%Y%m%d%H%M%S')
					wrapper['workInfo']['workEvntDate'] = workEvntDate					

					self.WORKINFO_REPO[workId] = wrapper
				__LOG__.Trace('WORKINFO_REPO: {}'.format(self.WORKINFO_REPO))
			else :
				__LOG__.Trace('Unsupported type: {}'.format(type(rawDict['workInfo'])))
				pass

			return workIdList
		else :
			__LOG__.Trace('workInfo rawData is None')
			return None

	def _loadEqpInfo(self, oneWorkId, rawDict, logDictList) :
		logDict 	= dict()
		yyyyMMdd	= None
		eventDate	= None

		if rawDict :
			__LOG__.Trace('eqpInfo rawData: {}'.format(rawDict))
			if 'eqpInfo' in rawDict and type(rawDict['eqpInfo']) is list :
				scriptFileList = []
				wrapper = self.WORKINFO_REPO[oneWorkId]
				if wrapper : 
					wrapper['eqpInfo'] = rawDict['eqpInfo']
					for oneEqpInfoDict in rawDict['eqpInfo'] :
						if 'scriptInfo' in oneEqpInfoDict :
							scriptInfoList = oneEqpInfoDict['scriptInfo']
								
							if scriptInfoList :
								for oneScriptInfoDict in scriptInfoList :
									filePathname = oneScriptInfoDict['atchdPathFileNm']
									if filePathname :
										remoteFilepath, remoteFilename = os.path.split(filePathname)
										__LOG__.Trace('remoteFilepath({}), remoteFilename({})'.format(remoteFilepath, remoteFilename))
										scriptFileDict = {}
										scriptFileDict['remoteFilepath'] = remoteFilepath
										scriptFileDict['remoteFilename'] = remoteFilename

										scriptFileList.append(scriptFileDict)
									else :
										__LOG__.Trace('workId({})/eqpNm({}) atchdPathFileNm({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], filePathname))
										pass
							else :
								__LOG__.Trace('workId({})/eqpNm({}) scriptInfoList({}) is invalid'.format(oneWorkId, oneEqpInfoDict['eqpNm'], scriptInfoList))
						else :
							__LOG__.Trace('workId({})/eqpNm({}) scriptInfo does not exist in eqpInfo'.format(oneWorkId, oneEqpInfoDict['eqpNm']))
							pass
				else :
					__LOG__.Trace('no registered workId({}) in WORKINFO_REPO'.format(oneWorkId))
					return
	
				__LOG__.Trace('scriptFileList: {}'.format(scriptFileList))
				eventDate 	= wrapper['workInfo']['workEvntDate']
				yyyyMMdd 	= datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%Y%m%d')
				__LOG__.Trace('eventDate({}), yyyyMMdd({})'.format(eventDate, yyyyMMdd))
				self._getScriptFiles(yyyyMMdd, oneWorkId, scriptFileList)

				logDict = self._writeTangoWorkFile(yyyyMMdd, eventDate, oneWorkId, wrapper)

				self._removeCompleteWorkInfo(oneWorkId)
			else :
				__LOG__.Trace('Unsupported type: {}'.format('eqpInfo' in rawDict if type(rawDict['eqpInfo']) else None ))
				pass
		else :
			__LOG__.Trace('workId({}), eqpInfo rawData is None'.format(oneWorkId))
			pass

		return logDict, yyyyMMdd, eventDate

	def _getScriptFiles(self, yyyyMMdd, workId, scriptFileList) :
		if not scriptFileList :
			__LOG__.Trace('scriptFileList({}) is empty'.format(scriptFileList))
			return

		try :
			tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
			self._mkdirs(tacsWorkInfoPath)

			sftpClient = SFTPClient.SftpClient(self.host, self.port, self.user, self.passwd)
			for oneScriptFileDict in scriptFileList :
				remoteFilepath 	= oneScriptFileDict['remoteFilepath']
				remoteFilename 	= oneScriptFileDict['remoteFilename'] 

				sftpClient.download(remoteFilepath, remoteFilename, tacsWorkInfoPath)
				__LOG__.Trace('scriptFile from({}) -> to({}) download succeed'.format(os.path.join(remoteFilepath, remoteFilename), os.path.join(tacsWorkInfoPath, remoteFilename)))

			sftpClient.close()
		except Exception as ex :
			__LOG__.Trace('scriptFile download proccess failed {}'.format(ex))
			self.createDateFile('{}_{}'.format(self.searchStartDate, self.searchEndDate))
			self._removeCompleteWorkInfo(workId)
			raise ex
	
	def _writeTangoWorkFile(self, yyyyMMdd, eventDate, workId, wrapper) :
		logDict = {}
		try :
			tacsWorkInfoPath = os.path.join(self.workInfoBaseDir, yyyyMMdd, workId)
			self._mkdirs(tacsWorkInfoPath)

			contents = json.dumps(wrapper, ensure_ascii=False)
			__LOG__.Trace('contents: {}'.format(contents))
			createFilePath = os.path.join(tacsWorkInfoPath, '{}_{}_META.json'.format(eventDate, workId))
			self._createFile(createFilePath, contents)
			logDict['tacsLnkgRst'] = 'OK'

			__LOG__.Trace(self.migration)

			if self.migration :
				__LOG__.Trace( ['mf','30000', 'put', 'dbl', 'stdoutfile://{}'.format(createFilePath)] )	
				subprocess.call(['mf', '30000', 'put,dbl,stdoutfile://{}'.format(createFilePath)])
			else :
				time.sleep(self.stdoutSleepTime)
				self._stdout('file://{}'.format(createFilePath))
		except Exception as ex :
			__LOG__.Trace('workFile write process failed {}'.format(ex))
			logDict['tacsLnkgRst'] = 'FAIL'
			logDict['tacsLnkgRsn'] = ex.args
			self._removeCompleteWorkInfo(workId)
			raise ex
		finally :
			logDict['evntTypCd'] 	= self.receivedWorkCode
			logDict['evntDate'] 	= eventDate
			logDict['workId']		= workId
			logDict['lnkgEqpIp']	= ''

		return logDict

#			self._writeTacsHistoryFile(yyyyMMdd, eventDate, logDict)

	def _writeTacsHistoryFile(self, yyyyMMdd, eventDate, logDictList) :
		if logDictList :
			__LOG__.Trace('received workInfo history: {}'.format(logDictList))
			try :
				tacsHistoryTempPath = os.path.join(self.auditLogTempDir, 'AUDIT_{}'.format(self.receivedWorkCode))
				self._mkdirs(tacsHistoryTempPath)
				contentList	= list()

				for oneLogDict in logDictList :
					content  = json.dumps(oneLogDict, ensure_ascii=False)
					contentList.append(content)		

				contents = '\n'.join(contentList)
			
				__LOG__.Trace('contents: {}'.format(contents))

				tacsHistoryFilename = self._getTacsHistoryFilename(yyyyMMdd, eventDate)
				__LOG__.Trace('tacsHistoryFilename: {}'.format(tacsHistoryFilename))
				self._createFile(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), contents) 
				
				tacsHistoryPath = os.path.join(self.auditLogBaseDir, 'AUDIT_{}'.format(self.receivedWorkCode))
				self._mkdirs(tacsHistoryPath)

				shutil.move(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename))
				__LOG__.Trace('tacsHistory file move from {} -> to {} succeed'.format(os.path.join(tacsHistoryTempPath, tacsHistoryFilename), os.path.join(tacsHistoryPath, tacsHistoryFilename)))
			except Exception as ex :
				__LOG__.Trace('tacsHistory {} load process failed {}'.format(logDict, ex))
		else :
			__LOG__.Trace('received workInfo history({}) is invalid'.format(logDict))

	def _mkdirs(self, directory) :
		__LOG__.Trace('{} isExists: {}'.format(directory, os.path.exists(directory)))
		if not os.path.exists(directory) :
			__LOG__.Trace('create directories {}'.format(directory))
			os.makedirs(directory)

	def _createFile(self, filePath, contents) :
		f = None
		try :
			f = open(filePath, 'w')
			f.write(contents)
			__LOG__.Trace('{} file is created'.format(filePath))
		except Exception as ex :
			__LOG__.Trace('{} to file process failed {}'.format(contents, ex))
			raise ex
		finally :
			if f :
				f.close()

	def _getTacsHistoryFilename(self, yyyyMMdd, eventDate) :
		HHmm 				= datetime.strptime(eventDate, '%Y%m%d%H%M%S').strftime('%H%M')
		tacsHistoryFilename = '{}_{}_{}.audit'.format(yyyyMMdd, HHmm, uuid.uuid4())
		return tacsHistoryFilename

	def _removeCompleteWorkInfo(self, workId) :
		if workId in self.WORKINFO_REPO :
			del self.WORKINFO_REPO[workId]
			__LOG__.Trace('workId({}), WORKINFO_REPO: {}'.format(workId, self.WORKINFO_REPO))

	def shutdown(self) :
		try :
			if self.scheduler :
				#self.scheduler.remove_job('workInfo_scheduler')
				self.scheduler.shutdown()
				__LOG__.Trace('schduler is terminated')
			else :
				_LOG__.Trace('scheduler is None')
		except Exception as ex :
			__LOG__.Trace('shutdown failed {}'.format(ex))

	def run(self, searchStartDate = None, searchEndDate = None, migration = False) :

		if not migration :
			self.scheduler = BlockingScheduler()
			self.scheduler.add_job(self._executeScheduler, 'cron', minute='*/{}'.format(self.scheduleInterval), second='0', id='workInfo_scheduler', max_instances=2)
			self.scheduler.start()

		else :
			self._executeMigration(searchStartDate, searchEndDate)
			__LOG__.Trace('migration proccess done')
Beispiel #38
0
def run():
    channels_update
    scheduler = BlockingScheduler()
    scheduler.add_job(channels_update, 'interval', hours=1)
    scheduler.start()
Beispiel #39
0
def main():
    """ main function """
    parser = argparse.ArgumentParser()
    parser.add_argument("-g", "--graph", action="store_true", default=False)
    parser.add_argument("-j", "--json", action="store_true", default=False)
    parser.add_argument("-t", "--test", action="store_true", default=False)
    parser.add_argument("-i", "--interval")
    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    interval = args.interval if args.interval else str(config.main.interval)
    drain = str2bool(config.main.drain)
    test_string = "(test)" if args.test else "(live)"
    setproctitle.setproctitle("greencandle-backend_{0}{1}".format(
        interval, test_string))

    minute = {
        "3m": "0,3,6,9,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57",
        "5m": "0,5,10,15,20,25,30,35,40,45,50,55",
        "15m": "0,15,30,45",
        "30m": "0,30",
        "1h": "0",
        "2h": "0",
        "3h": "0",
        "4h": "0",
    }

    hour = {
        "3m": "*",
        "5m": "*",
        "15m": "*",
        "30m": "*",
        "1h": "*",
        "2h": "0,2,4,6,8,10,12,14,16,18,20,22",
        "3h": "0,3,6,9,12,15,18,21",
        "4h": "0,4,8,12,16,20"
    }

    sched = BlockingScheduler()

    @GET_EXCEPTIONS
    @sched.scheduled_job('interval', seconds=int(config.main.check_interval))
    def get_price():
        LOGGER.info("Starting Price check")
        prod_int_check(interval, args.test)
        LOGGER.info("Finished Price check")

    @GET_EXCEPTIONS
    @sched.scheduled_job('interval', minutes=30)
    def get_graph():
        for pair in config.main.pairs.split():
            LOGGER.info("Creating graph for %s" % pair)
            volume = 'vol' in config.main.indicators
            graph = Graph(test=False,
                          pair=pair,
                          interval=config.main.interval,
                          volume=volume)
            graph.get_data()
            graph.create_graph('/data/graphs/')
            graph.get_screenshot()
            graph.resize_screenshot()

    @GET_EXCEPTIONS
    @sched.scheduled_job('interval', seconds=60)
    def keepalive():
        Path('/var/run/greencandle').touch()

    @GET_EXCEPTIONS
    @sched.scheduled_job('cron',
                         minute=minute[interval],
                         hour=hour[interval],
                         second="30")
    def prod_run():
        LOGGER.info("Starting prod run")
        prod_loop(interval, test_trade=args.test)
        LOGGER.info("Finished prod run")

    LOGGER.info("Starting initial prod run")
    prod_initial(interval)  # initial run, before scheduling begins
    LOGGER.info("Finished initial prod run")
    prod_run()

    try:
        sched.start()
    except KeyboardInterrupt:
        LOGGER.warning("\nExiting on user command...")
        sys.exit(1)
Beispiel #40
0
import os
from apscheduler.schedulers.blocking import BlockingScheduler
import sys
import datetime
from datetime import date, datetime, timedelta
import time
import calendar
from services import delete,push,backuptemperture
if __name__ == '__main__':
    
    service = BlockingScheduler(timezone="Asia/Taipei")

    service.add_job(backuptemperture.run,'cron',hour=0,minute=0)
    service.add_job(delete.run,'cron',hour=16,minute=48)
    service.add_job(push.push,'cron',day_of_week='mon-fri',hour=16,minute=50)

    try:
        service.start()
        
        
    except (KeyboardInterrupt, SystemExit):
        pass
Beispiel #41
0
from apscheduler.schedulers.blocking import BlockingScheduler
from main import *
import logging
from datetime import datetime

if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)
    downloader = Downloader()
    analyser = Analyser()
    scheduler = BlockingScheduler(logger=logger)
    scheduler.add_job(downloader.run,
                      'interval',
                      seconds=30,
                      next_run_time=datetime.now())
    scheduler.add_job(analyser.run,
                      'interval',
                      seconds=30,
                      next_run_time=datetime.now())

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Beispiel #42
0
    return msg


def getdouying():
    try:
        INFO('check douying')
        stampTime = int(time.time())
        msgDict = douying(stampTime, int(interval_delay))
        if msgDict:
            bot.send_group_msg_async(group_id=QQqun,
                                     message=msgDict,
                                     auto_escape=False)
            time.sleep(0.1)
    except:
        WARN('error when douying')
    finally:
        pass


try:
    sched.add_job(getdouying,
                  'interval',
                  seconds=interval_delay,
                  misfire_grace_time=interval_delay,
                  coalesce=True,
                  max_instances=15)
except Exception as e:
    WARN('error when start thread')
# 开始调度任务
sched.start()
def startScheduler():
    scheduler = BlockingScheduler()
    scheduler.add_job(scraperScheduler, 'interval', hours=0.5)
    scheduler.start()
Beispiel #44
0
def dojob():
    # 创建调度器:BlockingScheduler
    scheduler = BlockingScheduler()
    # 添加任务,时间间隔90S
    scheduler.add_job(task, 'interval', seconds=120, id='test_job1')
    scheduler.start()
Beispiel #45
0
    query_results = list(collection.find({"date": date}))
    to_be_collected = [result["contest_name"] for result in query_results]
    logging.info(to_be_collected)
    return to_be_collected


def edit_collect_file(contest_name: str) -> None:
    with open("collect.json") as f:
        collect = json.load(f)

    collect["sheets"][0]["tabName"] = contest_name
    collect["sheets"][0]["contests"][0]["link"] = contest_name

    with open("collect.json", "w") as f:
        json.dump(collect, f, indent=4)


@SCHED.scheduled_job("cron", id="collect_labs", hour=0, minute=0)
def collect_labs() -> None:
    logging.info("Collecting")
    contests = get_to_be_collected()
    if not contests:
        send_message("There aren't any contests to collect today.")
    for contest in contests:
        edit_collect_file(contest)
        main()
        send_message(f"Contest: {contest} succesfully collected.")


SCHED.start()
Beispiel #46
0
 def run_cron_jobs(self) -> None:
     """Runs the web scraper every morning."""
     schedule = BlockingScheduler()
     schedule.add_job(main, 'cron', day_of_week='0-6', hour='8')
     schedule.start()
Beispiel #47
0
 def start_task_():
     scheduler = BlockingScheduler()
     scheduler.add_job(parse_and_send_sms, 'interval',
                       seconds=60)  #seconds=60
     scheduler.start()
Beispiel #48
0
                                                 second=second)
        date = datetime.now() + timedelta(days=1)
        date_string = date.strftime("%Y-%m-%d")
        dt = date_string + " " + time
        return dt

    # 一次性任务,创建后,只会运行一次
    def job_once(self, scheduler, function_name):
        print("execute job : job_once")
        date_time = self.job_random_time()
        print(date_time)
        scheduler.add_job(function_name, 'date', run_date=date_time)
        print(scheduler.print_jobs())


if __name__ == '__main__':
    obj = DispatchHelper()
    cnbeta = CrawCnbeta()
    solidot = CrawSolidot()
    # 定义BlockingScheduler
    blockingScheduler = BlockingScheduler()
    blockingScheduler.add_job(obj.job_once,
                              'interval',
                              days=1,
                              args=[blockingScheduler, cnbeta.exec_cnbeta])
    blockingScheduler.add_job(obj.job_once,
                              'interval',
                              days=1,
                              args=[blockingScheduler, solidot.exec_solidot])
    blockingScheduler.start()
from apscheduler.schedulers.blocking import BlockingScheduler
from build import set_all_scripts_on_fire

sche = BlockingScheduler()


@sche.scheduled_job('interval', minutes=8)
def timed_job():
    set_all_scripts_on_fire()


sche.start()
Beispiel #50
0
class SpiderStarter:
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.stock_analyzer = None

    #初始化数据缓存:每日更新
    def stock_days_cache_task(self):
        print 'stock_days_cache start....'
        self.stock_analyzer = StockAnalyzer()
        print 'stock_days_cache end....'

    #股票列表更新:每个交易日9:25更新股票列表
    def stock_list_inc_spider_task(self):
        print 'stock_list_inc_spider start....'
        list_spider = StockListIncSpider()
        list_spider.get_stock_list()
        print 'stock_list_inc_spider end....'

    #股票日交易列表:每个交易日15:10更新交易数据
    def stock_day_inc_spider_task(self):
        print 'stock_day_inc_spider start....'
        day_inc_spider = StockDayIncSpider(None, is_persist=True)
        day_inc_spider.get_allstocks_day()
        print 'stock_day_inc_spider end....'

    def stock_day_realtime_spider_task(self):
        print 'stock_day_realtime_spider start....'
        if self.stock_analyzer is None:
            self.stock_analyzer = StockAnalyzer()
        spider = StockDayIncSpider(self.stock_analyzer)
        spider.get_allstocks_day()
        print 'stock_day_realtime_spider end....'

    def start(self):
        try:
            self.scheduler.add_job(self.stock_days_cache_task,
                                   'cron',
                                   minute='0',
                                   day_of_week='0-4',
                                   hour='9',
                                   id='stock_days_cache')
            self.scheduler.add_job(self.stock_list_inc_spider_task,
                                   'cron',
                                   minute='25',
                                   day_of_week='0-4',
                                   hour='9')
            self.scheduler.add_job(self.stock_day_inc_spider_task,
                                   id='stock_day_inc_spider',
                                   minute='10',
                                   day_of_week='0-4',
                                   hour='15')
            self.scheduler.add_job(self.stock_day_realtime_spider_task,
                                   'cron',
                                   second='*/1',
                                   day_of_week='0-4',
                                   hour='9-12,13-22',
                                   max_instances=1,
                                   id='stock_day_realtime_spider')
            self.scheduler.start()
        except Exception, e:
            print e
Beispiel #51
0
def scheduleTask():
    times = 0
    # 创建调度器:BlockingScheduler
    scheduler = BlockingScheduler()
    scheduler.add_job(task, 'interval', seconds=60, id='task1')
    scheduler.start()
Beispiel #52
0
def scheduler():
    sched = BlockingScheduler()
    sched.configure(timezone='Asia/Seoul')
    sched.add_job(fetch_cryptocompare, 'interval', minutes=1)  #매 분마다 돌리기
    sched.start()
Beispiel #53
0
    def add(self):
        for d in range(100, 110):
            self.t.__class__.cache.add(d)
            time.sleep(randint(1, 3))
            print d, len(self.t.__class__.cache)


if __name__ == '__main__':
    from apscheduler.jobstores.memory import MemoryJobStore
    from apscheduler.schedulers.blocking import BlockingScheduler
    from apscheduler.executors.pool import ThreadPoolExecutor

    jobstores = {'default': MemoryJobStore()}

    # using ThreadPoolExecutor as default other than ProcessPoolExecutor(not work) to executors
    executors = {
        'default': ThreadPoolExecutor(4),
    }

    job_defaults = {'coalesce': False, 'max_instances': 1}
    app = BlockingScheduler(jobstores=jobstores,
                            executors=executors,
                            job_defaults=job_defaults)

    def task():
        print len(CacheTest.cache)

    app.add_job(task, 'interval', seconds=2)
    app.start()
Beispiel #54
0
def clock() -> None:
    set_tag("pcapi.app_type", "clock")
    scheduler = BlockingScheduler()
    utils.activate_sentry(scheduler)

    scheduler.add_job(synchronize_allocine_stocks, "cron", day="*", hour="23")

    scheduler.add_job(synchronize_provider_api, "cron", day="*", hour="1")

    scheduler.add_job(pc_remote_import_beneficiaries,
                      "cron",
                      day="*",
                      hour="21",
                      minute="50")

    scheduler.add_job(pc_remote_import_beneficiaries_from_old_dms,
                      "cron",
                      day="*",
                      hour="20",
                      minute="50")

    scheduler.add_job(pc_import_beneficiaries_from_dms_v3, "cron", hour="*")

    scheduler.add_job(pc_import_beneficiaries_from_dms_v4,
                      "cron",
                      hour="*",
                      minute="20")

    scheduler.add_job(update_booking_used, "cron", day="*", hour="0")

    scheduler.add_job(
        pc_handle_expired_bookings,
        "cron",
        day="*",
        hour="5",
    )

    scheduler.add_job(
        pc_notify_soon_to_be_expired_individual_bookings,
        "cron",
        day="*",
        hour="5",
        minute="30",
    )

    scheduler.add_job(pc_notify_newly_eligible_users,
                      "cron",
                      day="*",
                      hour="3")

    scheduler.add_job(pc_clean_expired_tokens, "cron", day="*", hour="2")

    scheduler.add_job(pc_check_stock_quantity_consistency,
                      "cron",
                      day="*",
                      hour="1")

    scheduler.add_job(pc_send_tomorrow_events_notifications,
                      "cron",
                      day="*",
                      hour="16")

    scheduler.add_job(pc_clean_past_draft_offers, "cron", day="*", hour="20")

    scheduler.add_job(pc_send_withdrawal_terms_to_offerers_validated_yesterday,
                      "cron",
                      day="*",
                      hour="6")

    scheduler.add_job(pc_recredit_underage_users, "cron", day="*", hour="0")

    scheduler.start()
Beispiel #55
0
 def wrapper():
     scheduler = BlockingScheduler()
     scheduler.add_job(func, args[0], hours=kwargs['hour'])
     scheduler.start()
     return func(args=args, kwargs=kwargs)
Beispiel #56
0
def aps():
    sched = BlockingScheduler()
    sched.add_job(getaqi, 'interval', days=1)
    sched.start()
Beispiel #57
0
def run():
    scheduler = BlockingScheduler()
    scheduler.add_job(doValid, 'interval', minutes=30)
    scheduler.start()
from apscheduler.schedulers.blocking import BlockingScheduler
import urllib3

# 宣告一個排程
sched = BlockingScheduler()


# 定義排程 : 在周一至周五,每 20 分鐘就做一次 def scheduled_jog()
@sched.scheduled_job('cron', day_of_week='mon-fri', minute='*/20')
def scheduled_job():
    url = "https://fitnessbot-prd.herokuapp.com/"
    connect = urllib3.request.urlopen(url)


sched.start()  # 啟動排程
Beispiel #59
0
def job():
    sched = BlockingScheduler()
    sched.add_job(get, 'interval', hours=1, misfire_grace_time=300)
    sched.start()
    pass
Beispiel #60
0
        compulsory_service_id = compulsory_service.get('serviceId')

        if compulsory_service_id not in user_service_set:
            service_name = compulsory_service.get('service')
            service_json = {'tenant': tenant_id, 'service': service_name}
            pykube_util.deploy(service_json)
            print('deployed compulsory service %s successfully' % service_name)

            json_to_add = {
                'serviceId': compulsory_service_id,
                'service': service_name,
                'service_started': True,
                "active": True
            }
            add_doc(tenant_id, json_to_add)


# read_services()

if __name__ == '__main__':
    print('Starting kube scheduler')
    SCHEDULER_INTERVAL = constatnts.SCHEDULER_INTERVAL
    executors = {'default': ThreadPoolExecutor()}
    app_scheduler = BlockingScheduler(executors=executors, timezone=utc)

    app_scheduler.add_job(read_services,
                          'interval',
                          seconds=SCHEDULER_INTERVAL,
                          id='kubernetes deployment scheduler')
    app_scheduler.start()