Esempio n. 1
0
def test6():
    """定时执行任务,暂停,恢复, 实例化"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    job = scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print("作业id:{},作业名字:{},作业参数:{},作业函数:{},触发条件:{}".format(job.id, job.name, job.args, job.func,  job.trigger))
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            #scheduler.pause() # 暂停作业:
            job.pause() # 暂停单个实例
            break
    print('恢复作业')
    if time.time() - start_time >5:
        #scheduler.resume() # 恢复作业
        job.resume() # 恢复单个实例
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    # scheduler.remove_all_jobs() # 移除所有的作业
    job.remove() # 移除单个实例的作业
def test6():
    """定时执行任务,暂停,恢复, 实例化"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    job = scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print("作业id:{},作业名字:{},作业参数:{},作业函数:{},触发条件:{}".format(
        job.id, job.name, job.args, job.func, job.trigger))
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            #scheduler.pause() # 暂停作业:
            job.pause()  # 暂停单个实例
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        #scheduler.resume() # 恢复作业
        job.resume()  # 恢复单个实例
    time.sleep(4)
    print('当前任务列表:{}'.format(
        scheduler.get_jobs()))  # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id')  # 获取id为my_job_id的作业实例

    scheduler.print_jobs()  # 输出所有格式化的作业列表。

    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    # scheduler.remove_all_jobs() # 移除所有的作业
    job.remove()  # 移除单个实例的作业
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            scheduler.pause()  # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        scheduler.resume()  #
    time.sleep(4)
    print('当前任务列表:{}'.format(
        scheduler.get_jobs()))  # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id')  # 获取id为my_job_id的作业实例

    scheduler.print_jobs()  # 输出所有格式化的作业列表。

    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs()  # 移除所有的作业
Esempio n. 4
0
class AutoReserve():
    def __init__(self):
        self.scheduler  = BackgroundScheduler(timezone="Asia/Seoul")
        self.jobid      = 1
        self.jobdb      = {}

    def addGolfJob(self, ccName, BackFunc, user_id, user_pw, schedule, target_date, target_time, day=21):
        print('autoReserve in Jayuro', schedule.hour(), schedule.minute(), schedule.second(), target_date, target_time, day)
        print(self.jobid)
        while self.scheduler.get_job(self.jobid):
            print(self.jobid, self.scheduler.get_job(self.jobid))
        
        self.jobid += 1
        
        result = self.scheduler.add_job(BackFunc,'cron', args=[user_id, user_pw, target_date, target_time, day], week='1-53', day_of_week='0-6', \
                                            hour=schedule.hour(), minute=schedule.minute(), second=schedule.second(), id=str(self.jobid))
        print("result:", result)
        self.jobdb[self.jobid]= ccName

        if self.scheduler.state == 1: #apscheduler.schedulers.base.STATE_RUNNING
            print('Scheduler is running')
        elif self.scheduler.state == 2:
            print('Scheduler is paused')
        elif self.scheduler.state == 0:
            print('Scheduler is stopped')
            self.scheduler.start()

        return self.jobid

    def autoStop(self):   
        print('autoStop')
        for jj in self.scheduler.get_jobs():
            print(jj)

        self.scheduler.remove_all_jobs()
        self.jobdb.clear()
        self.jobid = 1
        print(self.scheduler.get_jobs())

    def autoInfo(self):
        print('Auto information')
        print(self.scheduler.get_jobs())
        for jj in self.scheduler.get_jobs():
            print(jj)

        for key in self.jobdb:
            print(key, self.jobdb[key])

    def printlog(self, target_date, target_time):
        now = datetime.datetime.now()
        print(str(now) + str(target_date + "," + target_time))
        # print("Running main process............... : " + str(datetime.datetime.now(timezone('Asia/Seoul'))))

    def getJobInfo(self):
        return self.jobdb
class AdvancedScheduleManager:
    def __init__(self):

        self.s = BackgroundScheduler()
        self.s.start()

    def reset_venue(self, bot, venue):

        print("reset venue")

        if self.s.get_job(venue) != None:
            print("remove venue")
            self.s.remove_job(venue)

        self.s.add_job(bot.send_non_update_venues,
                       'date',
                       run_date=datetime.now() + timedelta(seconds=3600),
                       kwargs={"venue": venue},
                       id=venue)

    def test_scheduler(self, bot):

        self.s.add_job(bot.pprint,
                       'date',
                       run_date=datetime.now() + timedelta(seconds=3),
                       kwargs={"text": "Advanced schedule執行"},
                       id="test")

    def get_queue(self):

        return len(self.s.get_jobs())
Esempio n. 6
0
class Scheduler:
    scheduler = None

    def __init__(self) -> None:
        super().__init__()
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.print_schedules()

    def __del__(self):
        self.shutdown()

    def shutdown(self):
        self.scheduler.shutdown()

    def schedule_event(self, aid, dt, event, args):
        scheduled = self.scheduler.add_job(func=event,
                                           trigger='date',
                                           id=aid,
                                           run_date=dt,
                                           args=args)
        return scheduled.id

    def update_event(self, aid, dt, event, args):
        scheduled = self.scheduler.get_job(job_id=aid)
        if scheduled:
            return scheduled.id
        else:
            return self.schedule_event(aid=aid, dt=dt, event=event, args=args)

    def print_schedules(self):
        return self.scheduler.print_jobs()
Esempio n. 7
0
def encode_compute():
    scheduler = BackgroundScheduler()
    scheduler.start()
    message = request.form['message']
    image = request.files['img']
    if not message or not image:
        return redirect(url_for('steganography_web.encode'))

    filename = 'website/static/images/' + str(
        session['uid']) + image.filename.split('.')[0] + '.png'
    image.save(filename)
    session['steganography_image'] = filename

    binary_string = encode_string(message) + encode_string(END_OF_ENCODE)
    image = encode_image(filename, binary_string)

    time = datetime.datetime.now() + datetime.timedelta(minutes=4)
    if scheduler.get_job(filename):
        scheduler.reschedule_job(job_id=filename,
                                 trigger='date',
                                 run_date=time)
        print(f'job rescheduled for {time}')
    else:
        scheduler.add_job(delete_file,
                          args=[filename],
                          trigger='date',
                          run_date=time,
                          id=filename)
        print(f'job scheduled for {time}')

    send_days_left_text()
    return redirect(url_for('steganography_web.encode', show='True'))
Esempio n. 8
0
def initialize_scheduler():
    """Initialize the task scheduler. This method configures the global
    scheduler, checks the loaded tasks, and ensures they are all scheduled.
    """
    global SCHEDULER

    # If the scheduler is not enabled, clear it and exit. This prevents any
    # unexpected database session issues.
    if not CONF.scheduler.enable:
        if SCHEDULER:
            SCHEDULER.remove_all_jobs()
            SCHEDULER = None
        LOG.info("Scheduler is not enabled.")
        return

    # Use SQLAlchemy as a Job store.
    jobstores = {
        'default': SQLAlchemyJobStore(engine=get_engine())
    }

    # Two executors: The default is for all plugins. The second one is for
    # the scheduler manager, which makes sure this scheduler instance is
    # aware of all of our plugins.
    executors = {
        'default': ThreadPoolExecutor(10),
        'manager': ThreadPoolExecutor(1),
    }

    # Allow executions to coalesce. See https://apscheduler.readthedocs.org/en
    # /latest/userguide.html#missed-job-executions-and-coalescing
    job_defaults = {
        'coalesce': True,
        'max_instances': 1,
        'replace_existing': True
    }

    # This will automatically create the table.
    SCHEDULER = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults,
                                    timezone=utc)

    SCHEDULER.start()
    atexit.register(shutdown_scheduler)

    # Make sure we load in the update_scheduler job. If it exists,
    # we remove/update it to make sure any code changes get propagated.
    if SCHEDULER.get_job(SCHEDULE_MANAGER_ID):
        SCHEDULER.remove_job(SCHEDULE_MANAGER_ID)
    SCHEDULER.add_job(
        update_scheduler,
        id=SCHEDULE_MANAGER_ID,
        trigger=IntervalTrigger(minutes=1),
        executor='manager',
        replace_existing=True
    )
Esempio n. 9
0
class Scheduler:
    def __init__(self):
        self.sched = BackgroundScheduler()
        self.sched.start()
        self.job_id = ''

    def __del__(self):
        self.shutdown()

    def shutdown(self):
        self.sched.shutdown()

    def add_schdule(self, job, trigger, job_id, args=None, seconds=None, minutes=None, hours=None, day=None,
                    day_of_week=None):
        if trigger == 'interval':
            self.sched.add_job(job, trigger=trigger, id=job_id, args=args, seconds=int(seconds))
        elif trigger == 'cron':
            self.sched.add_job(job, type, id=job_id, args=args,
                               second=seconds, minute=minutes, hour=hours, day=day, day_of_week=day_of_week)
            self.sched.get_job().modify()
Esempio n. 10
0
def initialize_scheduler():
    """Initialize the task scheduler. This method configures the global
    scheduler, checks the loaded tasks, and ensures they are all scheduled.
    """
    global SCHEDULER

    # If the scheduler is not enabled, clear it and exit. This prevents any
    # unexpected database session issues.
    if not CONF.scheduler.enable:
        if SCHEDULER:
            SCHEDULER.remove_all_jobs()
            SCHEDULER = None
        LOG.info("Scheduler is not enabled.")
        return

    # Use SQLAlchemy as a Job store.
    jobstores = {'default': SQLAlchemyJobStore(engine=get_engine())}

    # Two executors: The default is for all plugins. The second one is for
    # the scheduler manager, which makes sure this scheduler instance is
    # aware of all of our plugins.
    executors = {
        'default': ThreadPoolExecutor(10),
        'manager': ThreadPoolExecutor(1),
    }

    # Allow executions to coalesce. See https://apscheduler.readthedocs.org/en
    # /latest/userguide.html#missed-job-executions-and-coalescing
    job_defaults = {
        'coalesce': True,
        'max_instances': 1,
        'replace_existing': True
    }

    # This will automatically create the table.
    SCHEDULER = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults,
                                    timezone=utc)

    SCHEDULER.start()
    atexit.register(shutdown_scheduler)

    # Make sure we load in the update_scheduler job. If it exists,
    # we remove/update it to make sure any code changes get propagated.
    if SCHEDULER.get_job(SCHEDULE_MANAGER_ID):
        SCHEDULER.remove_job(SCHEDULE_MANAGER_ID)
    SCHEDULER.add_job(update_scheduler,
                      id=SCHEDULE_MANAGER_ID,
                      trigger=IntervalTrigger(minutes=1),
                      executor='manager',
                      replace_existing=True)
Esempio n. 11
0
class Aggregator():
    """Store data over time from Storj."""
    def __init__(self):
        """Constructor."""
        self.api = StorjApi()
        self.scheduler = BackgroundScheduler()

        self.scheduler.start()

    def _store_data(self, node_id):
        """Store the data for `node_id`."""
        try:
            info = self.api.get_contact_info(node_id)
            print(info)
        except ApiException as exception:
            print('Error retrieving information: {}'.format(exception.message))

    def start(self, node_id):
        """Start storing data."""
        if not self.scheduler.get_job(job_id=node_id):
            self.scheduler.add_job(func=self._store_data,
                                   trigger='interval',
                                   id=node_id,
                                   seconds=1,
                                   args=[node_id])
            return True

        return False

    def stop(self, node_id):
        """Stop storing data."""
        job = self.scheduler.get_job(job_id=node_id)
        if job:
            job.remove()
            return True

        return False
Esempio n. 12
0
class TaskScheduler:
    def __init__(self):
        jobs_database_name = 'jobs.sqlite'
        jobstores = {
            # 'default': SQLAlchemyJobStore(url=F'sqlite:///{jobs_database_name}')
        }
        executors = {
            'default': {
                'type': 'threadpool',
                'max_workers': 20
            },
            'processpool': ProcessPoolExecutor(max_workers=5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}
        self.scheduler = BackgroundScheduler()
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=utc)

    def get_task(self, task_id):
        return self.scheduler.get_job(task_id)

    def get_tasks_ids(self):
        task_ids = []
        jobs = self.scheduler.get_jobs()
        for j in jobs:
            task_ids.append(j.id)

        return task_ids

    def start_scheduler(self):
        self.scheduler.start()

    def stop_scheduler(self):
        self.scheduler.shutdown()

    def add_task(self, task_func, interval_minutes, args, task_id):
        print('Adding an interval task')
        self.scheduler.add_job(task_func,
                               IntervalTrigger(minutes=interval_minutes),
                               args=args,
                               id=str(task_id))
        print('Adding the interval task finished')

    def remove_task(self, id):
        print(F'Removing task (Id: {id})')
        self.scheduler.remove_job(id)
        print('The task removed.')
Esempio n. 13
0
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            scheduler.pause() # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time >5:
        scheduler.resume() # 
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs() # 移除所有的作业
Esempio n. 14
0
class ScheduleController:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        # self.scheduler.add_job()

    def run_schedules(self):
        try:
            self.scheduler.start()
            # Shut down the scheduler when exiting the app
            atexit.register(lambda: self.scheduler.shutdown())
        except Exception as e:
            print(
                "*** ScheduleController.run_schedules occurred an exception: {}"
                .format([e, e.with_traceback]))

    def get_pending_jobs(self, job_id=None):
        """
        Get list of jobs or get job by job_id
        :param job_id: string | optional if it's not none returns job which equals job_id
        :returns: List or Dict
        """
        try:
            if job_id is not None:
                return self.scheduler.get_job(job_id)
            else:
                return self.scheduler.get_jobs()
        except Exception as e:
            pass

    def register_schedule(self, callback, trigger_timer, job_id, name=None):
        """
        Void method
        Schedule registerer hook
        :param callback: function
        :param trigger_timer: the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``)
        :param job_id: string | Schedule job id
        :param name: string | optional name of job
        """
        if callable(callback) is False:
            return False
        else:
            self.scheduler.add_job(
                func=callback,
                trigger=trigger_timer,
                id=job_id,
                name=name if name is not None else str(job_id) +
                " Schedule registered without name",
                replace_existing=True)
Esempio n. 15
0
class Scheduler:
    timezone = settings.TIME_ZONE

    def __init__(self):
        self.scheduler = BackgroundScheduler(timezone=self.timezone, executors={'default': ThreadPoolExecutor(30)})

    def _dispatch(self, task_id, tp, targets, extra, threshold, quiet):
        Detection.objects.filter(pk=task_id).update(latest_run_time=human_datetime())
        rds_cli = get_redis_connection()
        for t in json.loads(targets):
            rds_cli.rpush(MONITOR_WORKER_KEY, json.dumps([task_id, tp, t, extra, threshold, quiet]))
        connections.close_all()

    def _init(self):
        self.scheduler.start()
        for item in Detection.objects.filter(is_active=True):
            now = datetime.now()
            trigger = IntervalTrigger(minutes=int(item.rate), timezone=self.timezone)
            self.scheduler.add_job(
                self._dispatch,
                trigger,
                id=str(item.id),
                args=(item.id, item.type, item.targets, item.extra, item.threshold, item.quiet),
                next_run_time=now + timedelta(seconds=randint(0, 60))
            )
        connections.close_all()

    def run(self):
        rds_cli = get_redis_connection()
        self._init()
        rds_cli.delete(settings.MONITOR_KEY)
        logging.warning('Running monitor')
        while True:
            _, data = rds_cli.brpop(settings.MONITOR_KEY)
            task = AttrDict(json.loads(data))
            if task.action in ('add', 'modify'):
                trigger = IntervalTrigger(minutes=int(task.rate), timezone=self.timezone)
                self.scheduler.add_job(
                    self._dispatch,
                    trigger,
                    id=str(task.id),
                    args=(task.id, task.type, task.targets, task.extra, task.threshold, task.quiet),
                    replace_existing=True
                )
            elif task.action == 'remove':
                job = self.scheduler.get_job(str(task.id))
                if job:
                    job.remove()
def _handle_next_queue_item(scheduler: BackgroundScheduler,
                            job_information: Dict[Text, Any]) -> None:
    job_id = job_information.pop(JOB_ID_KEY)
    existing_job: Optional[Job] = scheduler.get_job(job_id)

    should_cancel_job = job_information.pop(CANCEL_JOB_KEY, False)
    if should_cancel_job and existing_job:
        scheduler.remove_job(job_id)
        return

    if existing_job:
        _modify_job(existing_job, job_information)
    elif job_id == background_dump_service.BACKGROUND_DUMPING_JOB_ID:
        _add_job_to_dump_files(scheduler, job_information)
    else:
        logger.warning(f"Did not find a scheduled job with id '{job_id}'.")
Esempio n. 17
0
def init():
    printer.set_handler(print_handler)
    progressbar.set_handler(progressbar_handler)
    scheduler = BackgroundScheduler(daemon=True)

    load_min_err_rev_dic()

    job = scheduler.get_job("auto_check")
    if job is not None:
        scheduler.remove_job("auto_check")
    # 定时9点, 12点, 15点, 18点, 21点的时候开始检查。
    job = scheduler.add_job(auto_check,
                            'cron',
                            hour='6, 12, 15, 18, 21',
                            id="auto_check")
    scheduler.start()
Esempio n. 18
0
class Scheduler(object):
    def __init__(self):
        self._scheduler = BackgroundScheduler(executors=executors,
                                              job_defaults=job_defaults)
        self._scheduler.add_jobstore('redis',
                                     jobs_key='crontpy.jobs',
                                     run_times_key='crontpy.run_times')

    @property
    def running(self):
        return self._scheduler.running

    def start(self):
        self._scheduler.start()

    def shutdown(self, wait=True):
        self._scheduler.shutdown(wait)

    def pause(self):
        self._scheduler.pause()

    def resume(self):
        self._scheduler.resume()

    def get_jobs(self):
        return self._scheduler.get_jobs()

    def get_job(self, jid):
        return self._scheduler.get_job(job_id=jid)

    def run_job(self, jid):
        job = self.get_job(jid)
        if not job:
            raise Exception('job id:{0} not found'.format(jid))
        job.func(*job.args, **job.kwargs)

    def resume_job(self, jid):
        self._scheduler.resume_job(job_id=jid)

    def pause_job(self, jid):
        self._scheduler.pause_job(job_id=jid)

    def modify_job(self, jid, **changes):
        return self._scheduler.modify_job(job_id=jid, **changes)

    def delete_job(self, jid):
        self._scheduler.remove_job(job_id=jid)
Esempio n. 19
0
class Scheduler:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.jobs = {}  # schedule util cannot track job metadata

    # trigger_time_str format (in UTC): HH:MM:SS
    def schedule_job(self,
                     trigger_time_str,
                     job_func,
                     job_name,
                     job_args=[],
                     job_kwargs={}):
        if not re.match(TIME_REGEX, trigger_time_str):
            raise ValueError(
                f'invalid time format, expecting HH:MM:SS (in UTC), got: {trigger_time_str}'
            )

        # IntervalTrigger will schedule event every 24 hrs beginning at strat date BUT NOT in the past
        trigger = IntervalTrigger(hours=24,
                                  start_date=f'2021-01-01 {trigger_time_str}',
                                  timezone=pytz.utc)
        job = self.scheduler.add_job(job_func,
                                     args=job_args,
                                     kwargs=job_kwargs,
                                     trigger=trigger,
                                     name=job_name)
        self.jobs[job.id] = {
            'job_name': job_name,
            'job_args': job_args,
            'job_kwargs': job_kwargs
        }

    def unschedule_job(self, job_id):
        if job_id not in self.jobs:
            raise ValueError(
                f'Scheduler.unschedule_job: job_id not found: {job_id}')

        self.scheduler.remove_job(job_id)
        del self.jobs[job_id]

    def get_scheduled_jobs(self):
        for job_id, job_info in self.jobs.items():
            job_info['next_run_time'] = str(
                self.scheduler.get_job(job_id).next_run_time)
        return self.jobs
Esempio n. 20
0
    def job_event_handler(sched: BackgroundScheduler,
                          err_count: list[int | float],
                          event: JobExecutionEvent):
        if (time.time() - err_count[1]) > 60:
            err_count[0] = 0

        err_count[0] += 1
        err_count[1] = time.time()

        job: apscheduler.job.Job = sched.get_job(event.job_id)
        now = datetime.datetime.now()
        now = now.strftime("%Y-%m-%d %H:%M:%S")

        try:
            if event.exception:
                pushstr = "{}\n{}\n第{}次出现异常\n异常任务: {}\nTraceback (most recent call last):\n{}\n{}\n".format(
                    str(now),
                    str(get_self_dir()[2]),
                    str(err_count[0]),
                    str(job.name),
                    str(event.traceback),
                    str(event.exception),
                )
            else:
                pushstr = "{}\n{}\n第{}次出现异常\n异常任务: {}\n任务被跳过\n原定执行时间: \n{}\n".format(
                    str(now),
                    str(get_self_dir()[2]),
                    str(err_count[0]),
                    str(job.name),
                    str(event.scheduled_run_time),
                )
        except Exception:
            pushstr = "{}\n{}\n第{}次出现异常\n异常任务: 未知\n原定执行时间: \n{}\n".format(
                str(now),
                str(get_self_dir()[2]),
                str(err_count[0]),
                str(event.scheduled_run_time),
            )

        if err_count[0] >= 3:
            sched.pause()
            pushstr += "短时间内出现3次异常, 定时任务已暂停"

        print(pushstr)
        if "pushkey" in push_option:
            print(dayepao_push(pushstr, **push_option))
Esempio n. 21
0
class SwTimer(object):

    def __init__(self):
        self.scheduler = BackgroundScheduler()

    def getJob(self,jobId):
        return self.scheduler.get_job(jobId)

    def stop_job(self,jobId):
        self.scheduler.remove_job(jobId)

    def add_interval(self,jobId,jobFunc,sec=0,*args):
        #self.scheduler.add_job(jobFunc, 'interval',seconds=sec,id=jobId,args=args)
        self.scheduler.add_job(jobFunc, 'cron', second='*/%d'%sec,id=jobId,args=args)

    def run(self):
        self.scheduler.start()
Esempio n. 22
0
def mandelbrot():
    scheduler = BackgroundScheduler()
    scheduler.start()
    try:
        session['uid']
    except:
        session['uid'] = uuid.uuid4()

    if 'r' in request.values and 'i' in request.values:
        real = float(request.values['r'])
        imaginary = float(request.values['i'])

        image = create(real, imaginary)
        filename = 'website/static/images/' + str(
            session['uid']) + 'fractal.png'
        # image.save('static/images/fractal.png')
        image.save(filename)

        time = datetime.datetime.now() + datetime.timedelta(seconds=10)
        if scheduler.get_job(filename):
            scheduler.reschedule_job(job_id=filename,
                                     trigger='date',
                                     run_date=time)
            print(f'job rescheduled for {time}')
        else:
            scheduler.add_job(delete_file,
                              args=[filename],
                              trigger='date',
                              run_date=time,
                              id=filename)
            print(f'job scheduled for {time}')

        return render_template("mandelbrot.html",
                               image='/' + filename[8:] + '?' +
                               str(rand.randint(1000)),
                               real=real,
                               imag=imaginary)

    else:
        return render_template("mandelbrot.html",
                               image='/static/images/defaultFractal.png',
                               real=0,
                               imag=0)
Esempio n. 23
0
def setRoomEndDate(sender, instance, **kwargs):
    '''
    Set the timeout of the room for the extraction!
    '''
    # ToDo: reload all event on startup!
    # Add the job to the scheduler (only if not present)
    scheduler = BackgroundScheduler()
    if instance.job_id in [i.id for i in scheduler.get_jobs()]  and  instance.end_date > timezone.now():
        # The event is already present
        job = scheduler.get_job(instance.job_id)
        if job.trigger.run_date != make_aware(instance.end_date):
            # The event is different
            job.trigger.run_date = instance.end_date
            print(f"Event (is changed) for room {instance} will start at {instance.end_date}")
    elif instance.end_date > timezone.now():
        # Creation of a new event
        print(f"Event for room {instance} will start at {instance.end_date}")
        job = scheduler.add_job(xmasg.xmasg_extraction, 'date', run_date=instance.end_date, args=[instance.id])     # https://apscheduler.readthedocs.io/en/stable/modules/triggers/date.html#module-apscheduler.triggers.date
        scheduler.start()

        # Update on DB
        Room.objects.filter(pk=instance.id).update(job_id=job.id) # update should not launch this function again
Esempio n. 24
0
class Runner:
    state = RunnerStateEnum.STOPPED

    def __init__(self):
        self.state = RunnerStateEnum.RUNNING
        self.scheduler = BackgroundScheduler(logger=logger)
        self.jobs = {}

    def add_job(self, id: str, func: Callable, **kwargs):
        logger.debug(f"Adding job to queue: {id}")
        self.jobs[id] = self.scheduler.add_job(func, **kwargs)

    def remove_job(self, id: str):
        logger.debug(f"Removing job from queue: {id}")
        if not self.scheduler.get_job(id):
            logger.warning(f"Unable to remove job {id}")
            return
        self.scheduler.remove_job(id)

    def start(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
Esempio n. 25
0
class Scheduler:
    timezone = settings.TIME_ZONE

    def __init__(self):
        self.scheduler = BackgroundScheduler(timezone=self.timezone)
        self.scheduler.add_listener(self._handle_event, )

    @classmethod
    def parse_trigger(cls, trigger, trigger_args):
        if trigger == 'interval':
            return IntervalTrigger(seconds=int(trigger_args),
                                   timezone=cls.timezone)
        elif trigger == 'date':
            return DateTrigger(run_date=trigger_args, timezone=cls.timezone)
        else:
            raise TypeError(f'unknown schedule policy: {trigger!r}')

    def _handle_event(self, event):
        obj = SimpleLazyObject(
            lambda: Task.objects.filter(pk=event.job_id).first())
        if event.code == events.EVENT_SCHEDULER_SHUTDOWN:
            logger.info(f'EVENT_SCHEDULER_SHUTDOWN: {event}')
            Notify.make_notify('schedule', '1', '调度器已关闭',
                               '调度器意外关闭,你可以在github上提交issue')
        elif event.code == events.EVENT_JOB_MAX_INSTANCES:
            logger.info(f'EVENT_JOB_MAX_INSTANCES: {event}')
            Notify.make_notify('schedule', '1', f'{obj.name} - 达到调度实例上限',
                               '一般为上个周期的执行任务还未结束,请增加调度间隔或减少任务执行耗时')
        elif event.code == events.EVENT_JOB_ERROR:
            logger.info(
                f'EVENT_JOB_ERROR: job_id {event.job_id} exception: {event.exception}'
            )
            Notify.make_notify('schedule', '1', f'{obj.name} - 执行异常',
                               f'{event.exception}')
        elif event.code == events.EVENT_JOB_EXECUTED:
            if event.retval:
                score = 0
                for item in event.retval:
                    score += 1 if item[1] else 0
                Task.objects.filter(pk=event.job_id).update(
                    latest_status=2
                    if score == len(event.retval) else 1 if score else 0,
                    latest_run_time=human_datetime(event.scheduled_run_time),
                    latest_output=json.dumps(event.retval))
                if score != 0 and time.time() - counter.get(event.job_id,
                                                            0) > 3600:
                    counter[event.job_id] = time.time()
                    Notify.make_notify('schedule', '1', f'{obj.name} - 执行失败',
                                       '请在任务计划中查看失败详情')

    def _init_builtin_jobs(self):
        self.scheduler.add_job(auto_clean_records, 'cron', hour=0, minute=0)

    def _init(self):
        self.scheduler.start()
        self._init_builtin_jobs()
        for task in Task.objects.filter(is_active=True):
            trigger = self.parse_trigger(task.trigger, task.trigger_args)
            self.scheduler.add_job(
                dispatch,
                trigger,
                id=str(task.id),
                args=(task.command, json.loads(task.targets)),
            )

    def run(self):
        rds_cli = get_redis_connection()
        self._init()
        rds_cli.delete(settings.SCHEDULE_KEY)
        logger.info('Running scheduler')
        while True:
            _, data = rds_cli.blpop(settings.SCHEDULE_KEY)
            task = AttrDict(json.loads(data))
            if task.action in ('add', 'modify'):
                trigger = self.parse_trigger(task.trigger, task.trigger_args)
                self.scheduler.add_job(dispatch,
                                       trigger,
                                       id=str(task.id),
                                       args=(task.command, task.targets),
                                       replace_existing=True)
            elif task.action == 'remove':
                job = self.scheduler.get_job(str(task.id))
                if job:
                    job.remove()
Esempio n. 26
0
class FreezerScheduler(object):
    def __init__(self, apiclient, interval, job_path):
        # config_manager
        self.client = apiclient
        self.freezerc_executable = spawn.find_executable('freezer-agent')
        if self.freezerc_executable is None:
            # Needed in the case of a non-activated virtualenv
            self.freezerc_executable = spawn.find_executable('freezer-agent',
                                                             path=':'.join(
                                                                 sys.path))
        LOG.debug('Freezer-agent found at {0}'.format(
            self.freezerc_executable))
        self.job_path = job_path
        self._client = None
        self.lock = threading.Lock()
        self.execution_lock = threading.Lock()
        job_defaults = {'coalesce': True, 'max_instances': 2}
        executors = {
            'default': {
                'type': 'threadpool',
                'max_workers': 1
            },
            'threadpool': {
                'type': 'threadpool',
                'max_workers': 10
            }
        }
        self.scheduler = BackgroundScheduler(job_defaults=job_defaults,
                                             executors=executors)
        if self.client:
            self.scheduler.add_job(self.poll,
                                   'interval',
                                   seconds=interval,
                                   id='api_poll',
                                   executor='default')

        self.add_job = self.scheduler.add_job
        self.remove_job = self.scheduler.remove_job
        self.jobs = {}

    def get_jobs(self):
        if self.client:
            job_doc_list = utils.get_active_jobs_from_api(self.client)
            try:
                utils.save_jobs_to_disk(job_doc_list, self.job_path)
            except Exception as e:
                LOG.error('Unable to save jobs to {0}. '
                          '{1}'.format(self.job_path, e))
            return job_doc_list
        else:
            return utils.get_jobs_from_disk(self.job_path)

    def start_session(self, session_id, job_id, session_tag):
        if self.client:
            return self.client.sessions.start_session(session_id, job_id,
                                                      session_tag)
        else:
            raise Exception("Unable to start session: api not in use.")

    def end_session(self, session_id, job_id, session_tag, result):
        if self.client:
            return self.client.sessions.end_session(session_id, job_id,
                                                    session_tag, result)
        else:
            raise Exception("Unable to end session: api not in use.")

    def upload_metadata(self, metadata_doc):
        if self.client:
            self.client.backups.create(metadata_doc)

    def start(self):
        utils.do_register(self.client)
        self.poll()
        self.scheduler.start()
        try:
            while True:
                # Due to the new Background scheduler nature, we need to keep
                # the main thread alive.
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            # Not strictly necessary if daemonic mode is enabled but
            # should be done if possible
            self.scheduler.shutdown(wait=False)

    def update_job(self, job_id, job_doc):
        if self.client:
            try:
                return self.client.jobs.update(job_id, job_doc)
            except Exception as e:
                LOG.error("Job update error: {0}".format(e))

    def update_job_schedule(self, job_id, job_schedule):
        """
        Pushes to the API the updates the job_schedule information
        of the job_doc

        :param job_id: id of the job to modify
        :param job_schedule: dict containing the job_scheduler information
        :return: None
        """
        doc = {'job_schedule': job_schedule}
        self.update_job(job_id, doc)

    def update_job_status(self, job_id, status):
        doc = {'job_schedule': {'status': status}}
        self.update_job(job_id, doc)

    def is_scheduled(self, job_id):
        return self.scheduler.get_job(job_id) is not None

    def create_job(self, job_doc):
        job = scheduler_job.Job.create(self, self.freezerc_executable, job_doc)
        if job:
            self.jobs[job.id] = job
            LOG.info("Created job {0}".format(job.id))
        return job

    def poll(self):
        try:
            work_job_doc_list = self.get_jobs()
        except Exception as e:
            LOG.error("Unable to get jobs: {0}".format(e))
            return

        work_job_id_list = []

        # create job if necessary, then let it process its events
        for job_doc in work_job_doc_list:
            job_id = job_doc['job_id']
            work_job_id_list.append(job_id)
            job = self.jobs.get(job_id, None) or self.create_job(job_doc)
            if job:
                # check for abort status
                if job_doc['job_schedule']['event'] == 'abort':
                    pid = int(job_doc['job_schedule']['current_pid'])
                    utils.terminate_subprocess(pid, 'freezer-agent')

                job.process_event(job_doc)

        # request removal of any job that has been removed in the api
        for job_id, job in six.iteritems(self.jobs):
            if job_id not in work_job_id_list:
                job.remove()

        remove_list = [
            job_id for job_id, job in self.jobs.items()
            if job.can_be_removed()
        ]

        for k in remove_list:
            self.jobs.pop(k)

    def stop(self):
        sys.exit()

    def reload(self):
        LOG.warning("reload not supported")
Esempio n. 27
0
class BileanScheduler(object):
    """Billing scheduler based on apscheduler"""

    job_types = (
        NOTIFY, DAILY, FREEZE,
    ) = (
        'notify', 'daily', 'freeze',
    )
    trigger_types = (DATE, CRON) = ('date', 'cron')

    def __init__(self, **kwargs):
        super(BileanScheduler, self).__init__()
        self._scheduler = BackgroundScheduler()
        self.notifier = notifier.Notifier()
        self.engine_id = kwargs.get('engine_id', None)
        self.context = kwargs.get('context', None)
        if not self.context:
            self.context = bilean_context.get_admin_context()
        if cfg.CONF.bilean_task.store_ap_job:
            self._scheduler.add_jobstore(cfg.CONF.bilean_task.backend,
                                         url=cfg.CONF.bilean_task.connection)

    def init_scheduler(self):
        """Init all jobs related to the engine from db."""
        jobs = db_api.job_get_all(self.context, engine_id=self.engine_id)
        if not jobs:
            LOG.info(_LI("No job found from db"))
            return True
        for job in jobs:
            if self.bilean_scheduler.is_exist(job.id):
                continue
            task_name = "_%s_task" % (job.job_type)
            task = getattr(self, task_name)
            self.bilean_task.add_job(task, job.id,
                                     job_type=job.job_type,
                                     params=job.parameters)

    def add_job(self, task, job_id, trigger_type='date', **kwargs):
        """Add a job to scheduler by given data.

        :param str|unicode user_id: used as job_id
        :param datetime alarm_time: when to first run the job

        """
        mg_time = cfg.CONF.bilean_task.misfire_grace_time
        job_time_zone = cfg.CONF.bilean_task.time_zone
        user_id = job_id.split('-')[1]
        if trigger_type == 'date':
            run_date = kwargs.get('run_date')
            if run_date is None:
                msg = "Param run_date cannot be None for trigger type 'date'."
                raise exception.InvalidInput(reason=msg)
            self._scheduler.add_job(task, 'date',
                                    timezone=job_time_zone,
                                    run_date=run_date,
                                    args=[user_id],
                                    id=job_id,
                                    misfire_grace_time=mg_time)
            return True

        # Add a cron type job
        hour = kwargs.get('hour', None)
        minute = kwargs.get('minute', None)
        if not hour or not minute:
            hour, minute = self._generate_timer()
        self._scheduler.add_job(task, 'cron',
                                timezone=job_time_zone,
                                hour=hour,
                                minute=minute,
                                args=[user_id],
                                id=job_id,
                                misfire_grace_time=mg_time)
        return True

    def modify_job(self, job_id, **changes):
        """Modifies the properties of a single job.

        Modifications are passed to this method as extra keyword arguments.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.modify_job(job_id, **changes)

    def remove_job(self, job_id):
        """Removes a job, preventing it from being run any more.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.remove_job(job_id)

    def start(self):
        LOG.info(_('Starting Billing scheduler'))
        self._scheduler.start()

    def stop(self):
        LOG.info(_('Stopping Billing scheduler'))
        self._scheduler.shutdown()

    def is_exist(self, job_id):
        """Returns if the Job exists that matches the given ``job_id``.

        :param str|unicode job_id: the identifier of the job
        :return: True|False
        """

        job = self._scheduler.get_job(job_id)
        return job is not None

    def _notify_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        msg = {'user': user.id, 'notification': 'The balance is almost use up'}
        self.notifier.info('billing.notify', msg)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'notify'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))
        self._add_freeze_job(user)

    def _daily_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'daily'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _freeze_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'freeze'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _add_notify_job(self, user):
        if not user.rate:
            return False
        total_seconds = user['balance'] / user['rate']
        prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
        notify_seconds = total_seconds - prior_notify_time
        notify_seconds = notify_seconds if notify_seconds > 0 else 0
        run_date = timeutils.utcnow() + timedelta(seconds=notify_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user['id'], self.NOTIFY)
        self.add_job(self._notify_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.NOTIFY,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)

    def _add_freeze_job(self, user):
        if not user.rate:
            return False
        total_seconds = user.balance / user.rate
        run_date = timeutils.utcnow() + timedelta(seconds=total_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user.id, self.FREEZE)
        self.add_job(self._freeze_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.FREEZE,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)
        return True

    def _add_daily_job(self, user):
        job_id = self._generate_job_id(user.id, self.DAILY)
        params = {'hour': random.randint(0, 23),
                  'minute': random.randint(0, 59)}
        self.add_job(self._daily_task, job_id, trigger_type='cron',
                     params=params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.DAILY,
               'engine_id': self.engine_id,
               'parameters': params}
        db_api.job_create(self.context, job)
        return True

    def _delete_all_job(self, user):
        for job_type in self.job_types:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def update_user_job(self, user):
        """Update user's billing job"""
        if user.status not in [user.ACTIVE, user.WARNING]:
            self._delete_all_job(user.id)
            return

        for job_type in self.NOTIFY, self.FREEZE:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

        daily_job_id = self._generate_job_id(user.id, self.DAILY)
        if not self.is_exist(daily_job_id):
            self._add_daily_job(user)

        if user.status == user.ACTIVE:
            self._add_notify_job(user)
        else:
            self._add_freeze_job(user)

    def _generate_timer(self):
        """Generate a random timer include hour and minute."""
        hour = random.randint(0, 23)
        minute = random.randint(0, 59)
        return hour, minute

    def _generate_job_id(self, user_id, job_type):
        """Generate job id by given user_id and job type"""
        return "%s-%s" % (job_type, user_id)
Esempio n. 28
0
class ScheduleCommand(Command):
    schedule_sender = None
    def __init__(self, logger, message_sender):
        super().__init__(logger, message_sender)
        # Message scheduler configuration
        self.scheduler = BackgroundScheduler()
        configuration = self.message_sender.get_configuration()
        sdb_url = configuration.get('Message Scheduler', 'db_path')
        self.scheduler.add_jobstore('sqlalchemy', url=sdb_url)
        self.scheduler.start()
        ScheduleCommand.schedule_sender = message_sender

    def process(self, chat_id, user_id, username, arguments):
        if len(arguments) < 1:
            return self.help()
        
        operation = arguments[0] 
        command_arguments = arguments[1:]

        if operation == "add":
            return self.add_message(username, chat_id, command_arguments)
        elif operation == "time":
            return self.get_local_time()
        elif operation == "remove":
            return self.remove_message(username, command_arguments)
        else:
            return self.help()

    def get_local_time(self):
        date_str =  str(datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
        return "My time is {}".format(date_str)

    def add_message(self, username, chat_id, arguments):
        if len(arguments) <2:
            return self.help()
        date = None
        message = None
        if arguments[0] == 'relative':
            if len(arguments)<3:
                return self.help()
            date = self.on_delta_parse(arguments[1])
            message = arguments[2]
        else:
            date = self.on_date_parse(arguments[0])
            message = arguments[1]
        
        if message.startswith('"') and message.endswith('"'):
            message = message[1:-1]

        reference = self.user_reference(username)
        if date is None:
            return "{}Date format not recognized".format(reference)

        current_date = datetime.today()
        if date < current_date:
            current_str = self.get_human_string_date(current_date)
            return "{}Sorry, I can't travel to the past, my current date is: {}".format(reference, current_str)
 
        message_id = str(random.randrange(0, 999999999))
        self.scheduler.add_job(ScheduleCommand.send_programmed_message, \
                'date', run_date=date, args=[username, \
                chat_id, message], id = message_id) 
        
        date_str = self.get_human_string_date(date)
        return "{}The message [{}] has been successfully scheduled on {}"\
                .format(reference,  message_id, date_str)

    def send_programmed_message(username, chat_id, message):
        reference = ScheduleCommand.user_reference(username)
        text = "{}{}".format(reference, message)
        ScheduleCommand.schedule_sender.send_message(chat_id, text)    
    send_programmed_message = staticmethod(send_programmed_message)

    def remove_message(self, username, arguments): 
        if len(arguments) < 1:
            return self.help()
        
        founds = 0
        ids = self.get_comma_arguments(arguments[0])
        
        for message_id in ids:
            job = self.scheduler.get_job(message_id)
            if job == None:
                continue
            if job.args[0] != username:
                continue
            self.scheduler.remove_job(message_id)
            founds += 1
        
        reference = self.user_reference(username)
        if founds == len(ids):
            if len(ids) ==1:
                return "{}The scheduled message was canceled.".format(reference)
            else:
                return "{}The scheduled messages were canceled.".format(reference)
        elif founds > 0:
            return "{}Some scheduled messages were canceled, but some others were not found or you aren't the owner.".format(reference)
        else:
            if len(ids) == 1:
                return "{}The scheduled message was not found or you aren't the owner.".format(reference)
            else:
                return "{}The scheduled message were not found or you aren't the owner.".format(reference)

    def get_human_string_date(self,datetime):
        return str(datetime.strftime('%Y-%m-%d %H:%M:%S'))

    def on_delta_parse(self, text_date):
        regex = re.compile(r'^((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?$')
        parts = regex.match(text_date)
        if not parts:
            return None
        parts = parts.groupdict()
        time_params = {}
        valid_params = 0
        for (name, param) in parts.items():
            if param:
                time_params[name] = int(param)
                valid_params = valid_params + 1
        if valid_params == 0:
            return None
        return datetime.now() + timedelta(**time_params)

    def on_date_parse(self, text_date):
        try:
            return parser.parse(text_date, dayfirst = True, yearfirst = True)
        except ValueError:
            return None

    def user_reference(username):
        if username is None:
            return ""
        else:
            return "@{}: ".format(username)
    user_reference = staticmethod(user_reference)

    def help(self):
        self.logger.info("Printing help for schedule message command.")
        return self.get_file_help(__file__, "schedule_message.man")
    def name(self):
        return "schedule_message"
    
    def description(self):
        return "Schedule a message to be sent in a specified date."     
Esempio n. 29
0
class Trigger:

    def __init__(self, app):
        self.scheduler = None
        self.app = app

    def setup(self):
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': self.app.config["TRIGGER_DATABASE_URL"] #os.environ.get('TRIGGER_DATABASE_URL')
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '30'
            },
            'apscheduler.job_defaults.coalesce': 'false',
            'apscheduler.job_defaults.max_instances': '20',
            'apscheduler.timezone': 'UTC',
        })

    def start(self):

        self.scheduler.start()

    def is_running(self):
        return self.scheduler.running()

    def shutdown(self):
        self.scheduler.shutdown()

    def load_job_list(self):
        with self.app.app_context():
            projects = AutoProject.query.all()
            # key_list = ("minute", "hour", "day", "month", "day_of_week")

            for p in projects:
                if p.enable and self.scheduler.get_job(p.id) is None:
                    cron = p.cron.replace("\n", "").strip().split(" ")
                    #print(cron)
                    if len(cron) < 5:
                        continue
                    j = self.scheduler.add_job(func=run_job, trigger='cron', name=p.name, replace_existing=True,
                                               minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                               id="%s" % p.id, args=(p.id,))
                else:
                    self.update_job(p.id)

    def add_job(self, func, name, id, cron):
        if self.scheduler.get_job(id) is None:
            self.scheduler.add_job(func=func, trigger='cron', name=name,
                                   minute=cron[0],
                                   hour=cron[1],
                                   day=cron[2],
                                   month=cron[3],
                                   day_of_week=cron[4],
                                   id="%s" % id)

    def update_job(self, id):
        with self.app.app_context():
            p = AutoProject.query.filter_by(id=id).first()
            if p.enable:
                cron = p.cron.replace("\n", "").strip().split(" ")
                if len(cron) < 5:
                    return False
                print(self.scheduler.get_job(id))

                if self.scheduler.get_job(id) is None:
                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))
                else:
                    self.remove_job(id)

                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))

            return True

    def remove_job(self, id):
        if self.scheduler.get_job(id) is not None:
            self.scheduler.remove_job(id)

    def pause_job(self, id):
        pass

    def resume_job(self, id):
        pass

    def get_jobs(self):
        to_zone = tz.gettz("CST")
        #jobs = self.scheduler.get_jobs()
        urls = {
            "pass": "******",
            "fail": "fail.png",
            "running": "run.gif",
            "none": "project.png"
        }
        projects = AutoProject.query.order_by(AutoProject.id.desc()).all()
        data = {"total": len(projects), "rows": []}

        for p in projects:
            next_run_time = "调度未启动"
            status = "pass"
            job = self.scheduler.get_job(p.id)
            if job is not None:
                next_run_time = job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")

                # 获取该job下最后一次运行状态
                task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
                if task is not None:
                    output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
                    if os.path.exists(output_dir + "/report.html"):
                        tree = ET.parse(output_dir + "/output.xml")
                        root = tree.getroot()
                        # passed = root.find("./statistics/suite/stat").attrib["pass"]
                        fail = root.find("./statistics/suite/stat").attrib["fail"]
                        if int(fail) != 0:
                            status = 'fail'
                        else:
                            status = 'pass'
            else:
                status = "none"

            data["rows"].append({"id": "%s" % p.id,
                                 "name": p.name,
                                 "enable": p.enable,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": p.cron,
                                 "next_run_time": next_run_time
                                 })

        """
        for job in jobs:
            status = "running"
            task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
            if task is None:
                continue

            output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
            if os.path.exists(output_dir + "/report.html"):
                tree = ET.parse(output_dir + "/output.xml")
                root = tree.getroot()
                #passed = root.find("./statistics/suite/stat").attrib["pass"]
                fail = root.find("./statistics/suite/stat").attrib["fail"]
                if int(fail) != 0:
                    status = 'fail'
                else:
                    status = 'pass'

            data["rows"].append({"id": "%s" % job.id,
                                 "name": job.name,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": AutoProject.query.filter_by(id=job.id).first().cron,
                                 "next_run_time": job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
                                 })
        """

        return data

    def print_jobs(self):
        pass
Esempio n. 30
0
class Scheduler:
    timezone = settings.TIME_ZONE

    def __init__(self):
        self.scheduler = BackgroundScheduler(timezone=self.timezone)
        self.scheduler.add_listener(
            self._handle_event,
            EVENT_SCHEDULER_SHUTDOWN | EVENT_JOB_ERROR | EVENT_JOB_MAX_INSTANCES | EVENT_JOB_EXECUTED)

    def _record_alarm(self, obj, status):
        duration = seconds_to_human(time.time() - obj.latest_fault_time)
        Alarm.objects.create(
            name=obj.name,
            type=obj.get_type_display(),
            status=status,
            duration=duration,
            notify_grp=obj.notify_grp,
            notify_mode=obj.notify_mode)

    def _do_notify(self, event, obj):
        grp = json.loads(obj.notify_grp)
        for mode in json.loads(obj.notify_mode):
            if mode == '1':
                spug.notify_by_wx(event, obj.name, grp)
            elif mode == '3':
                spug.notify_by_dd(event, obj.name, grp)
            elif mode == '4':
                spug.notify_by_email(event, obj.name, grp)

    def _handle_notify(self, obj, old_status):
        if obj.latest_status == 0:
            if old_status == 1:
                self._record_alarm(obj, '2')
                logger.info(f'{human_datetime()} recover job_id: {obj.id}')
                self._do_notify('2', obj)
        else:
            if obj.fault_times >= obj.threshold:
                if time.time() - obj.latest_notify_time >= obj.quiet * 60:
                    obj.latest_notify_time = int(time.time())
                    obj.save()
                    self._record_alarm(obj, '1')
                    logger.info(f'{human_datetime()} notify job_id: {obj.id}')
                    self._do_notify('1', obj)

    def _handle_event(self, event):
        close_old_connections()
        obj = SimpleLazyObject(lambda: Detection.objects.filter(pk=event.job_id).first())
        if event.code == EVENT_SCHEDULER_SHUTDOWN:
            logger.info(f'EVENT_SCHEDULER_SHUTDOWN: {event}')
            Notify.make_notify('monitor', '1', '调度器已关闭', '调度器意外关闭,你可以在github上提交issue', False)
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            logger.info(f'EVENT_JOB_MAX_INSTANCES: {event}')
            Notify.make_notify('monitor', '1', f'{obj.name} - 达到调度实例上限', '一般为上个周期的执行任务还未结束,请增加调度间隔或减少任务执行耗时')
        elif event.code == EVENT_JOB_ERROR:
            logger.info(f'EVENT_JOB_ERROR: job_id {event.job_id} exception: {event.exception}')
            Notify.make_notify('monitor', '1', f'{obj.name} - 执行异常', f'{event.exception}')
        elif event.code == EVENT_JOB_EXECUTED:
            obj = Detection.objects.filter(pk=event.job_id).first()
            old_status = obj.latest_status
            obj.latest_status = 0 if event.retval else 1
            obj.latest_run_time = human_datetime(event.scheduled_run_time)
            if old_status in [0, None] and event.retval is False:
                obj.latest_fault_time = int(time.time())
            if obj.latest_status == 0:
                obj.latest_notify_time = 0
                obj.fault_times = 0
            else:
                obj.fault_times += 1
            obj.save()
            self._handle_notify(obj, old_status)

    def _init(self):
        self.scheduler.start()
        for item in Detection.objects.filter(is_active=True):
            trigger = IntervalTrigger(minutes=int(item.rate), timezone=self.timezone)
            self.scheduler.add_job(
                dispatch,
                trigger,
                id=str(item.id),
                args=(item.type, item.addr, item.extra),
            )

    def run(self):
        rds_cli = get_redis_connection()
        self._init()
        rds_cli.delete(settings.MONITOR_KEY)
        logger.info('Running monitor')
        while True:
            _, data = rds_cli.blpop(settings.MONITOR_KEY)
            task = AttrDict(json.loads(data))
            if task.action in ('add', 'modify'):
                trigger = IntervalTrigger(minutes=int(task.rate), timezone=self.timezone)
                self.scheduler.add_job(
                    dispatch,
                    trigger,
                    id=str(task.id),
                    args=(task.type, task.addr, task.extra),
                    replace_existing=True
                )
            elif task.action == 'remove':
                job = self.scheduler.get_job(str(task.id))
                if job:
                    job.remove()
Esempio n. 31
0
class VolatileDictionary(dict):
    def __init__(self, *args, **kwargs):
        super().__init__(self, *args, **kwargs)
        self._scheduler = BackgroundScheduler()
        self._scheduler.start()

        self._evaporation_jobs = {}

    def is_set_volatile(self, key):
        return key in self._evaporation_jobs

    def __setitem__(self, key, value, t=None):
        if is_key_time_tuple(key):
            key, t = key
        super().__setitem__(key, value)
        if t is not None:
            self._schedule_evaporation(key, t)

    def __delitem__(self, key):
        if key in self._evaporation_jobs:
            self.cancel_volatility(key)

        super().__delitem__(key)

    def _schedule_evaporation(self, key, t):
        date = datetime.now() + timedelta(seconds=t)
        job = self._scheduler.add_job(self._evaporate,
                                      'date',
                                      run_date=date,
                                      args=(key, ))
        self._evaporation_jobs[key] = job.id

    def _evaporate(self, key):
        super().__delitem__(key)
        del self._evaporation_jobs[key]

    def cancel_volatility(self, key):
        if not self.is_set_volatile(key):
            raise NonvolatileTypeError(key)
        self._scheduler.remove_job(self._evaporation_jobs[key])
        del self._evaporation_jobs[key]

    def get_set_lifetime(self, key):
        if not self.is_set_volatile(key):
            raise NonvolatileTypeError(key)

        job_id = self._evaporation_jobs[key]
        job = self._scheduler.get_job(job_id)
        job_date = job.trigger.run_date
        return (job_date - datetime.now(job_date.tzinfo)).total_seconds()

    def volatile_keys(self):
        return [key for key in self if self.is_set_volatile(key)]

    def nonvolatile_keys(self):
        return [key for key in self if not self.is_set_volatile(key)]

    def volatile_values(self):
        return [self[key] for key in self.volatile_keys()]

    def nonvolatile_values(self):
        return [self[key] for key in self.nonvolatile_keys()]

    def volatile_items(self):
        return [(key, self[key]) for key in self.volatile_keys()]

    def nonvolatile_items(self):
        return [(key, self[key]) for key in self.nonvolatile_keys()]

    def __str__(self):
        volatile_sets = self.volatile_items()
        nonvolatile_sets = self.nonvolatile_items()

        string = ''
        if len(volatile_sets) > 0:
            string += 'Volatile sets:\n'
            for key, value in volatile_sets:
                t = self.get_set_lifetime(key)
                string += '\t{}: {} [{}s]\n'.format(key, value, t)
            string += '\n'
        if len(nonvolatile_sets) > 0:
            string += 'Nonvolatile sets:\n'
            for key, value in nonvolatile_sets:
                string += '\t{}: {}\n'.format(key, value)
        return string
Esempio n. 32
0
class JobManager(object):
    def __init__(self):
        self.scheduler = BackgroundScheduler(executors=EXECUTORS,
                                             job_defaults=JOB_DEFAULTS,
                                             timezone='Asia/Shanghai')
        self.jobs = {}
        self.scheduler.start()

    def add_job_store(self):
        pass

    def add_job(self, method, jobtype, trigger, jobid, args=None, kwargs=None):
        job = None
        if jobtype == 'interval':
            job = self.scheduler.add_job(method,
                                         'interval',
                                         seconds=trigger,
                                         id=jobid,
                                         args=args,
                                         kwargs=kwargs)

        if jobtype == 'cron':
            Trigger = CronTrigger(**trigger)
            job = self.scheduler.add_job(method,
                                         Trigger,
                                         id=jobid,
                                         args=args,
                                         kwargs=kwargs)

        if jobtype == 'once':
            job = self.scheduler.add_job(method,
                                         'date',
                                         run_date=trigger,
                                         id=jobid,
                                         args=args,
                                         kwargs=kwargs)

        LOG.debug("add job: %s", jobid)
        if job is not None:
            self.jobs[jobid] = job

    def delete_job(self, jobid):
        if jobid in self.jobs:
            self.scheduler.remove_job(jobid)
            del self.jobs[jobid]

    def disable_job(self, jobid):
        if jobid in self.jobs:
            self.scheduler.pause_job(jobid)

    def enable_job(self, jobid):
        if jobid in self.jobs:
            self.scheduler.resume_job(jobid)

    def get_job_next_run_time(self, jobid):
        job = self.scheduler.get_job(jobid)

        if not job:
            return None

        next_run_time = job.next_run_time

        return next_run_time.strftime("%Y-%m-%d %H:%M:%S")
Esempio n. 33
0
class Trigger:

    def __init__(self, app):
        self.scheduler = None
        self.app = app

    def setup(self):
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': self.app.config["TRIGGER_DATABASE_URL"] #os.environ.get('TRIGGER_DATABASE_URL')
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '30'
            },
            'apscheduler.job_defaults.coalesce': 'false',
            'apscheduler.job_defaults.max_instances': '20',
            'apscheduler.timezone': 'UTC',
        })

    def start(self):

        self.scheduler.start()

    def is_running(self):
        return self.scheduler.running()

    def shutdown(self):
        self.scheduler.shutdown()

    def load_job_list(self):
        with self.app.app_context():
            projects = AutoProject.query.all()
            # key_list = ("minute", "hour", "day", "month", "day_of_week")

            for p in projects:
                if p.enable and self.scheduler.get_job(p.id) is None:
                    cron = p.cron.replace("\n", "").strip().split(" ")
                    #print(cron)
                    if len(cron) < 5:
                        continue
                    j = self.scheduler.add_job(func=run_job, trigger='cron', name=p.name, replace_existing=True,
                                               minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                               id="%s" % p.id, args=(p.id,))
                else:
                    self.update_job(p.id)

    def update_job(self, id):
        with self.app.app_context():
            p = AutoProject.query.filter_by(id=id).first()
            if p.enable:
                cron = p.cron.replace("\n", "").strip().split(" ")
                if len(cron) < 5:
                    return False
                print(self.scheduler.get_job(id))

                if self.scheduler.get_job(id) is None:
                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))
                else:
                    self.remove_job(id)

                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))

            return True

    def remove_job(self, id):
        if self.scheduler.get_job(id) is not None:
            self.scheduler.remove_job(id)

    def pause_job(self, id):
        pass

    def resume_job(self, id):
        pass

    def get_jobs(self):
        to_zone = tz.gettz("CST")
        #jobs = self.scheduler.get_jobs()
        urls = {
            "pass": "******",
            "fail": "fail.png",
            "running": "run.gif",
            "none": "project.png"
        }
        projects = AutoProject.query.order_by(AutoProject.id.desc()).all()
        data = {"total": len(projects), "rows": []}

        for p in projects:
            next_run_time = "调度未启动"
            job = self.scheduler.get_job(p.id)
            if job is not None:
                next_run_time = job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")

                # 获取该job下最后一次运行状态
                task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
                if task is not None:
                    output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
                    if os.path.exists(output_dir + "/report.html"):
                        tree = ET.parse(output_dir + "/output.xml")
                        root = tree.getroot()
                        # passed = root.find("./statistics/suite/stat").attrib["pass"]
                        fail = root.find("./statistics/suite/stat").attrib["fail"]
                        if int(fail) != 0:
                            status = 'fail'
                        else:
                            status = 'pass'
            else:
                status = "none"

            data["rows"].append({"id": "%s" % p.id,
                                 "name": p.name,
                                 "enable": p.enable,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": p.cron,
                                 "next_run_time": next_run_time
                                 })

        """
        for job in jobs:
            status = "running"
            task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
            if task is None:
                continue

            output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
            if os.path.exists(output_dir + "/report.html"):
                tree = ET.parse(output_dir + "/output.xml")
                root = tree.getroot()
                #passed = root.find("./statistics/suite/stat").attrib["pass"]
                fail = root.find("./statistics/suite/stat").attrib["fail"]
                if int(fail) != 0:
                    status = 'fail'
                else:
                    status = 'pass'

            data["rows"].append({"id": "%s" % job.id,
                                 "name": job.name,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": AutoProject.query.filter_by(id=job.id).first().cron,
                                 "next_run_time": job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
                                 })
        """

        return data

    def print_jobs(self):
        pass
Esempio n. 34
0
class OnionooManager():
    def __init__(self, proxy: Proxy = None):

        self.log = logging.getLogger('theonionbox')

        self.proxy = proxy
        self.documents = {}

        executors = {'default': ThreadPoolExecutor(50)}
        job_defaults = {'coalesce': True, 'max_instances': 10}
        self.scheduler = BackgroundScheduler(logger=self.log,
                                             executors=executors,
                                             job_defaults=job_defaults)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()

    def query(self, update: Document, document: OnionooDocument,
              fingerprint: str):

        # https://trac.torproject.org/projects/tor/ticket/6320
        hash = hashlib.sha1(a2b_hex(fingerprint)).hexdigest()
        payload = {'lookup': hash}

        headers = {'accept-encoding': 'gzip'}
        if len(update.ifModSince) > 0:
            headers['if-modified-since'] = update.ifModSince

        proxy_address = self.proxy.address(
        ) if self.proxy is not None else None

        if proxy_address is None:
            proxies = {}
            query_base = ONIONOO_OPEN
        else:
            proxies = {
                'http': 'socks5h://' + proxy_address,
                'https': 'socks5h://' + proxy_address
            }
            query_base = ONIONOO_HIDDEN[randint(0, len(ONIONOO_HIDDEN) - 1)]

        query_address = '{}/{}'.format(query_base, document.value)

        r = None

        self.log.debug(
            f"OoM|{fingerprint[:6]}: Launching query @ '{query_address}'.")

        try:
            r = requests.get(query_address,
                             params=payload,
                             headers=headers,
                             proxies=proxies,
                             timeout=10)
        except requests.exceptions.ConnectTimeout:
            self.log.info(
                f"OoM|{fingerprint[:6]}/{document.value}: Connection timeout @ '{query_base}'."
            )
            # We'll try again next time...
        except Exception as exc:
            self.log.warning(
                f"OoM|{fingerprint[:6]}: Failed querying '{query_address}' -> {exc}."
            )
        else:
            self.log.debug(
                f"OoM|{fingerprint[:6]}/{document.value}: {r.status_code}; {len(r.text)} chars received."
            )

        if r is None:
            return

        update.ifModSince = r.headers['last-modified']
        if r.status_code != requests.codes.ok:
            return

        try:
            data = r.json()
        except Exception as exc:
            self.log.debug(
                f"OoM|{fingerprint[:6]}/{document.value}: Failed to un-json network data -> {exc}."
            )
            return

        update.update(data)

    def register(self, fingerprint: str) -> OnionooData:

        self.log.debug(f'Registering OoM|{fingerprint[:6]}.')

        documents = {}

        for d in OnionooDocument:
            hash = hashlib.sha256(f'{fingerprint.lower()}|{d.value}'.encode(
                'UTF-8')).hexdigest()

            if hash not in self.documents:
                self.documents[hash] = Document()

                self.scheduler.add_job(
                    self.query,
                    trigger='interval',
                    hours=2,
                    jitter=3600,
                    next_run_time=datetime.now(
                    ),  # to trigger once immediately!
                    kwargs={
                        'update': self.documents[hash],
                        'document': d,
                        'fingerprint': fingerprint
                    },
                    id=hash)

            documents[d] = self.documents[hash]

        return OnionooData(documents)

    def trigger(self, fingerprint: str):

        for d in OnionooDocument:
            hash = hashlib.sha256(f'{fingerprint.lower()}|{d.value}'.encode(
                'UTF-8')).hexdigest()
            with contextlib.suppress(Exception):
                self.scheduler.get_job(job_id=hash).modify(
                    next_run_time=datetime.now())
Esempio n. 35
0
class Scheduler(object):
    def __init__(self, tasks, settings):
        self.settings = settings
        self.logger = get_logger('scheduler', self.settings)

        self.intervals = self.settings['scheduler']['intervals']
        self.apscheduler = { 'apscheduler': self.settings['scheduler']['apscheduler'] }

        if not isinstance(tasks, dict):
            self.logger.error('tasks is not a dictionary')
            return

        if not isinstance(self.intervals, dict):
            self.logger.error('intervals is not a dictionary')
            return

        self.tasks = self._flatten_dict(tasks, '')

        self.logger.debug('Tasks found:')
        self.logger.debug(LINE_SPLITTER)
        for key in self.tasks:
            self.logger.debug('%45s %30s' % (key, self.tasks[key].task_type))
        self.logger.debug(LINE_SPLITTER)

        #self.logger.debug('Checking tasks paths!')
        # TODO: Check if paths are valid

    def init(self):
        """ Initializes the queue, and adds the tasks """

        self.logger.info('Initilizing APScheduler...')

        apsched_kwargs = self._flatten_dict(self.apscheduler, '')
        apsched_kwargs['apscheduler.logger'] = get_logger('apscheduler', self.settings)

        self.sched = BackgroundScheduler(apsched_kwargs)

        for (id, task) in self.tasks.items():
            task_type = task.task_type

            self.logger.debug('Adding task "%s" [%s]' % (id, task_type))

            if not task_type in self.intervals:
                self.logger.info('Interval not defined for "%s" class. Assuming it is an once-time task' % task_type)
                self.add_task(id, task)
                continue

            self.add_task(id, task, self.intervals[task_type])

        self.logger.info('APScheduler initialized!')

    def clear(self):
        """ Removes all jobs from scheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        for job in self.sched.get_jobs():
            job.remove()

    def start(self):
        """ Start the scheduler by starting the instance of APScheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        try:
            self.sched.start()
        except SchedulerAlreadyRunningError as e:
            self.logger.warning(e)


    def stop(self, wait=True):
        """ Stop the scheduler. If wait=True, then it will be stopped after
            all jobs that are currently executed will finish """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        try:
            self.sched.shutdown(wait=wait)
        except SchedulerNotRunningError as e:
            self.logger.warning(e)

    def add_task(self, id, func, interval=None):
        """ Adds a new task into the queue. If interval is None then the task
            will be executed once. """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        if not hasattr(func, '__call__'):
            self.logger.error('"func" is not callable')
            return

        try:
            if isinstance(interval, dict):
                self.sched.add_job(func, trigger='interval', id=id, **interval)
            elif interval is None: # Run once (ommit trigger)
                self.sched.add_job(func, id=id)
            else:
                self.logger.error('"interval" is not an instance of [time|None]')
                return
        except ConflictingIdError as e:
            self.logger.warning(e)

    def remove_task(self, id):
        """ Remove a job from the queue """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        try:
            self.sched.remove_job(id)
        except JobLookupError as e:
            self.logger.warning(e)

    def force_update(self, job_id=None):
        """ Updates a job with id == job_id, or all jobs if no id is given """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        if not job_id:
            self.logger.info("Forcing update of all jobs")
            for job in self.sched.get_jobs():
                self.__run_job(job)
        else:
            self.logger.info("Forcing update of job %s" % job_id)
            job = self.sched.get_job(job_id)

            if not job:
                self.logger.warn("Job %s not found" % job_id)
            else:
                self.__run_job(job)

    def __run_job(self, job):
        if job.func:
            # Add the job to the scheduler and run it just once
            self.sched.add_job(job.func)

            # If we explicity call job.func() then we block the thread and we get multiple
            # missed executions from apscheduler
            # job.func()
        else:
            self.logger.warn("Job %s has a None type callable func" % job.id)

    def _flatten_dict(self, d, path):
        new_dict = { }
        for key in d:
            if isinstance(d[key], dict):
                new_path = '%s.%s' % (path, key) if path else key

                x = self._flatten_dict(d[key],new_path).copy()
                new_dict.update(x)
            else:
                new_key = '%s.%s' % (path, key)
                new_dict[new_key] = d[key]

        return new_dict
Esempio n. 36
0
class Core(object):
    def __init__(self):
        self.started = False
        self.daemon = None
        self.io_loop = IOLoop().instance()
        self.pid = os.getpid()

        self.tz = tz.tzlocal()

        self.config_file = None
        self.data_dir = None
        self.cache_dir = None
        self.quite = None
        self.no_launch = None
        self.web_port = None
        self.developer = None
        self.debug = None
        self.newest_version = None
        self.newest_version_string = None

        self.user_agent = 'SiCKRAGE.CE.1/({};{};{})'.format(platform.system(), platform.release(), str(uuid.uuid1()))
        self.sys_encoding = get_sys_encoding()
        self.languages = [language for language in os.listdir(sickrage.LOCALE_DIR) if '_' in language]
        self.showlist = []

        self.api = None
        self.adba_connection = None
        self.notifier_providers = None
        self.metadata_providers = None
        self.search_providers = None
        self.log = None
        self.config = None
        self.alerts = None
        self.main_db = None
        self.cache_db = None
        self.failed_db = None
        self.scheduler = None
        self.wserver = None
        self.wsession = None
        self.google_auth = None
        self.name_cache = None
        self.show_queue = None
        self.search_queue = None
        self.postprocessor_queue = None
        self.version_updater = None
        self.show_updater = None
        self.daily_searcher = None
        self.backlog_searcher = None
        self.proper_searcher = None
        self.trakt_searcher = None
        self.subtitle_searcher = None
        self.auto_postprocessor = None

        # patch modules with encoding kludge
        patch_modules()

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # init core classes
        self.notifier_providers = NotifierProviders()
        self.metadata_providers = MetadataProviders()
        self.search_providers = SearchProviders()
        self.log = Logger()
        self.config = Config()
        self.api = API()
        self.alerts = Notifications()
        self.main_db = MainDB()
        self.cache_db = CacheDB()
        self.failed_db = FailedDB()
        self.scheduler = BackgroundScheduler()
        self.wserver = WebServer()
        self.wsession = WebSession()
        self.google_auth = GoogleAuth()
        self.name_cache = NameCache()
        self.show_queue = ShowQueue()
        self.search_queue = SearchQueue()
        self.postprocessor_queue = PostProcessorQueue()
        self.version_updater = VersionUpdater()
        self.show_updater = ShowUpdater()
        self.daily_searcher = DailySearcher()
        self.backlog_searcher = BacklogSearcher()
        self.proper_searcher = ProperSearcher()
        self.trakt_searcher = TraktSearcher()
        self.subtitle_searcher = SubtitleSearcher()
        self.auto_postprocessor = AutoPostProcessor()


        # Check if we need to perform a restore first
        if os.path.exists(os.path.abspath(os.path.join(self.data_dir, 'restore'))):
            success = restoreSR(os.path.abspath(os.path.join(self.data_dir, 'restore')), self.data_dir)
            print("Restoring SiCKRAGE backup: %s!\n" % ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(os.path.join(self.data_dir, 'restore')), ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db'))):
            if os.path.isfile(os.path.join(self.data_dir, 'sickrage.db')):
                helpers.moveFile(os.path.join(self.data_dir, 'sickrage.db'),
                                 os.path.join(self.data_dir, '{}.bak-{}'
                                              .format('sickrage.db',
                                                      datetime.datetime.now().strftime(
                                                          '%Y%m%d_%H%M%S'))))

            helpers.moveFile(os.path.abspath(os.path.join(self.data_dir, 'sickbeard.db')),
                             os.path.abspath(os.path.join(self.data_dir, 'sickrage.db')))

        # load config
        self.config.load()

        # set language
        self.config.change_gui_lang(self.config.gui_lang)

        # set socket timeout
        socket.setdefaulttimeout(self.config.socket_timeout)

        # setup logger settings
        self.log.logSize = self.config.log_size
        self.log.logNr = self.config.log_nr
        self.log.logFile = os.path.join(self.data_dir, 'logs', 'sickrage.log')
        self.log.debugLogging = self.config.debug
        self.log.consoleLogging = not self.quite

        # start logger
        self.log.start()

        # user agent
        if self.config.random_user_agent:
            self.user_agent = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.user_agent

        # Check available space
        try:
            total_space, available_space = getFreeSpace(self.data_dir)
            if available_space < 100:
                self.log.error('Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data '
                               'otherwise. Only %sMB left', available_space)
                return
        except Exception:
            self.log.error('Failed getting diskspace: %s', traceback.format_exc())

        # perform database startup actions
        for db in [self.main_db, self.cache_db, self.failed_db]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not sickrage.app.developer and self.config.last_db_compact < time.time() - 604800:  # 7 days
            self.main_db.compact()
            self.config.last_db_compact = int(time.time())

        # load name cache
        self.name_cache.load()

        # load data for shows from database
        self.load_shows()

        if self.config.default_page not in ('home', 'schedule', 'history', 'news', 'IRC'):
            self.config.default_page = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.app.cache_dir, folder), ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.config.use_anidb:
            def anidb_logger(msg):
                return self.log.debug("AniDB: {} ".format(msg))

            try:
                self.adba_connection = adba.Connection(keepAlive=True, log=anidb_logger)
                self.adba_connection.auth(self.config.anidb_username, self.config.anidb_password)
            except Exception as e:
                self.log.warning("AniDB exception msg: %r " % repr(e))

        if self.config.web_port < 21 or self.config.web_port > 65535:
            self.config.web_port = 8081

        if not self.config.web_cookie_secret:
            self.config.web_cookie_secret = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.config.anon_redirect.endswith('?'):
            self.config.anon_redirect = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.config.root_dirs):
            self.config.root_dirs = ''

        self.config.naming_force_folders = check_force_season_folders()

        if self.config.nzb_method not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.config.nzb_method = 'blackhole'

        if self.config.torrent_method not in ('blackhole', 'utorrent', 'transmission', 'deluge', 'deluged',
                                              'download_station', 'rtorrent', 'qbittorrent', 'mlnet', 'putio'):
            self.config.torrent_method = 'blackhole'

        if self.config.autopostprocessor_freq < self.config.min_autopostprocessor_freq:
            self.config.autopostprocessor_freq = self.config.min_autopostprocessor_freq
        if self.config.daily_searcher_freq < self.config.min_daily_searcher_freq:
            self.config.daily_searcher_freq = self.config.min_daily_searcher_freq
        self.config.min_backlog_searcher_freq = self.backlog_searcher.get_backlog_cycle_time()
        if self.config.backlog_searcher_freq < self.config.min_backlog_searcher_freq:
            self.config.backlog_searcher_freq = self.config.min_backlog_searcher_freq
        if self.config.version_updater_freq < self.config.min_version_updater_freq:
            self.config.version_updater_freq = self.config.min_version_updater_freq
        if self.config.subtitle_searcher_freq < self.config.min_subtitle_searcher_freq:
            self.config.subtitle_searcher_freq = self.config.min_subtitle_searcher_freq
        if self.config.proper_searcher_interval not in ('15m', '45m', '90m', '4h', 'daily'):
            self.config.proper_searcher_interval = 'daily'
        if self.config.showupdate_hour < 0 or self.config.showupdate_hour > 23:
            self.config.showupdate_hour = 0
        if self.config.subtitles_languages[0] == '':
            self.config.subtitles_languages = []

        # add version checker job
        self.scheduler.add_job(
            self.version_updater.run,
            IntervalTrigger(
                hours=self.config.version_updater_freq
            ),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER"
        )

        # add network timezones updater job
        self.scheduler.add_job(
            update_network_dict,
            IntervalTrigger(
                days=1
            ),
            name="TZUPDATER",
            id="TZUPDATER"
        )

        # add show updater job
        self.scheduler.add_job(
            self.show_updater.run,
            IntervalTrigger(
                days=1,
                start_date=datetime.datetime.now().replace(hour=self.config.showupdate_hour)
            ),
            name="SHOWUPDATER",
            id="SHOWUPDATER"
        )

        # add daily search job
        self.scheduler.add_job(
            self.daily_searcher.run,
            IntervalTrigger(
                minutes=self.config.daily_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=4)
            ),
            name="DAILYSEARCHER",
            id="DAILYSEARCHER"
        )

        # add backlog search job
        self.scheduler.add_job(
            self.backlog_searcher.run,
            IntervalTrigger(
                minutes=self.config.backlog_searcher_freq,
                start_date=datetime.datetime.now() + datetime.timedelta(minutes=30)
            ),
            name="BACKLOG",
            id="BACKLOG"
        )

        # add auto-postprocessing job
        self.scheduler.add_job(
            self.auto_postprocessor.run,
            IntervalTrigger(
                minutes=self.config.autopostprocessor_freq
            ),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR"
        )

        # add find proper job
        self.scheduler.add_job(
            self.proper_searcher.run,
            IntervalTrigger(
                minutes={'15m': 15, '45m': 45, '90m': 90, '4h': 4 * 60, 'daily': 24 * 60}[
                    self.config.proper_searcher_interval]
            ),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER"
        )

        # add trakt.tv checker job
        self.scheduler.add_job(
            self.trakt_searcher.run,
            IntervalTrigger(
                hours=1
            ),
            name="TRAKTSEARCHER",
            id="TRAKTSEARCHER"
        )

        # add subtitles finder job
        self.scheduler.add_job(
            self.subtitle_searcher.run,
            IntervalTrigger(
                hours=self.config.subtitle_searcher_freq
            ),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER"
        )

        # start scheduler service
        self.scheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.scheduler.get_job('PROPERSEARCHER').pause,
         self.scheduler.get_job('PROPERSEARCHER').resume
         )[self.config.download_propers]()

        # Pause/Resume TRAKTSEARCHER job
        (self.scheduler.get_job('TRAKTSEARCHER').pause,
         self.scheduler.get_job('TRAKTSEARCHER').resume
         )[self.config.use_trakt]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.scheduler.get_job('SUBTITLESEARCHER').pause,
         self.scheduler.get_job('SUBTITLESEARCHER').resume
         )[self.config.use_subtitles]()

        # Pause/Resume POSTPROCESS job
        (self.scheduler.get_job('POSTPROCESSOR').pause,
         self.scheduler.get_job('POSTPROCESSOR').resume
         )[self.config.process_automatically]()

        # start queue's
        self.search_queue.start()
        self.show_queue.start()
        self.postprocessor_queue.start()

        # start webserver
        self.wserver.start()

    def shutdown(self, restart=False):
        if self.started:
            self.log.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            self.wserver.shutdown()

            # shutdown show queue
            if self.show_queue:
                self.log.debug("Shutting down show queue")
                self.show_queue.shutdown()
                del self.show_queue

            # shutdown search queue
            if self.search_queue:
                self.log.debug("Shutting down search queue")
                self.search_queue.shutdown()
                del self.search_queue

            # shutdown post-processor queue
            if self.postprocessor_queue:
                self.log.debug("Shutting down post-processor queue")
                self.postprocessor_queue.shutdown()
                del self.postprocessor_queue

            # log out of ADBA
            if self.adba_connection:
                self.log.debug("Shutting down ANIDB connection")
                self.adba_connection.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.main_db, self.cache_db, self.failed_db]:
                if db.opened:
                    self.log.debug("Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            self.log.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)

        if sickrage.app.daemon:
            sickrage.app.daemon.stop()

        self.started = False

    def save_all(self):
        # write all shows
        self.log.info("Saving all shows to the database")
        for show in self.showlist:
            try:
                show.saveToDB()
            except Exception:
                continue

        # save config
        self.config.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [x['doc'] for x in self.main_db.db.all('tv_shows', with_doc=True)]:
            try:
                self.log.debug("Loading data for show: [{}]".format(dbData['show_name']))
                show = TVShow(int(dbData['indexer']), int(dbData['indexer_id']))
                show.nextEpisode()
                self.showlist += [show]
            except Exception as e:
                self.log.error("Show error in [%s]: %s" % (dbData['location'], e.message))
Esempio n. 37
0
class Scheduler:
    timezone = settings.TIME_ZONE
    week_map = {
        '*': '*',
        '7': '6',
        '0': '6',
        '1': '0',
        '2': '1',
        '3': '2',
        '4': '3',
        '5': '4',
        '6': '5',
    }

    def __init__(self):
        self.scheduler = BackgroundScheduler(timezone=self.timezone)
        self.scheduler.add_listener(
            self._handle_event, EVENT_SCHEDULER_SHUTDOWN | EVENT_JOB_ERROR
            | EVENT_JOB_MAX_INSTANCES | EVENT_JOB_EXECUTED)

    @classmethod
    def parse_trigger(cls, trigger, trigger_args):
        if trigger == 'interval':
            return IntervalTrigger(seconds=int(trigger_args),
                                   timezone=cls.timezone)
        elif trigger == 'date':
            return DateTrigger(run_date=trigger_args, timezone=cls.timezone)
        elif trigger == 'cron':
            args = json.loads(trigger_args) if not isinstance(
                trigger_args, dict) else trigger_args
            minute, hour, day, month, week = args['rule'].split()
            week = cls.week_map[week]
            return CronTrigger(minute=minute,
                               hour=hour,
                               day=day,
                               month=month,
                               day_of_week=week,
                               start_date=args['start'],
                               end_date=args['stop'])
        else:
            raise TypeError(f'unknown schedule policy: {trigger!r}')

    def _handle_event(self, event):
        close_old_connections()
        obj = SimpleLazyObject(
            lambda: Task.objects.filter(pk=event.job_id).first())
        if event.code == EVENT_SCHEDULER_SHUTDOWN:
            logger.info(f'EVENT_SCHEDULER_SHUTDOWN: {event}')
            Notify.make_notify('schedule', '1', '调度器已关闭',
                               '调度器意外关闭,你可以在github上提交issue')
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            logger.info(f'EVENT_JOB_MAX_INSTANCES: {event}')
            Notify.make_notify('schedule', '1', f'{obj.name} - 达到调度实例上限',
                               '一般为上个周期的执行任务还未结束,请增加调度间隔或减少任务执行耗时')
        elif event.code == EVENT_JOB_ERROR:
            logger.info(
                f'EVENT_JOB_ERROR: job_id {event.job_id} exception: {event.exception}'
            )
            Notify.make_notify('schedule', '1', f'{obj.name} - 执行异常',
                               f'{event.exception}')
        elif event.code == EVENT_JOB_EXECUTED:
            if event.retval:
                score = 0
                for item in event.retval:
                    score += 1 if item[1] else 0
                history = History.objects.create(
                    task_id=event.job_id,
                    status=2
                    if score == len(event.retval) else 1 if score else 0,
                    run_time=human_datetime(event.scheduled_run_time),
                    output=json.dumps(event.retval))
                Task.objects.filter(pk=event.job_id).update(latest=history)
                if score != 0 and time.time() - counter.get(event.job_id,
                                                            0) > 3600:
                    counter[event.job_id] = time.time()
                    Notify.make_notify('schedule', '1', f'{obj.name} - 执行失败',
                                       '请在任务计划中查看失败详情')

    def _init_builtin_jobs(self):
        self.scheduler.add_job(auto_clean_records, 'cron', hour=0, minute=0)
        self.scheduler.add_job(auto_clean_schedule_history,
                               'cron',
                               hour=0,
                               minute=0)

    def _init(self):
        self.scheduler.start()
        self._init_builtin_jobs()
        for task in Task.objects.filter(is_active=True):
            trigger = self.parse_trigger(task.trigger, task.trigger_args)
            self.scheduler.add_job(
                dispatch,
                trigger,
                id=str(task.id),
                args=(task.command, json.loads(task.targets)),
            )

    def run(self):
        rds_cli = get_redis_connection()
        self._init()
        rds_cli.delete(settings.SCHEDULE_KEY)
        logger.info('Running scheduler')
        while True:
            _, data = rds_cli.brpop(settings.SCHEDULE_KEY)
            task = AttrDict(json.loads(data))
            if task.action in ('add', 'modify'):
                trigger = self.parse_trigger(task.trigger, task.trigger_args)
                self.scheduler.add_job(dispatch,
                                       trigger,
                                       id=str(task.id),
                                       args=(task.command, task.targets),
                                       replace_existing=True)
            elif task.action == 'remove':
                job = self.scheduler.get_job(str(task.id))
                if job:
                    job.remove()
Esempio n. 38
0
import os, time
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ProcessPoolExecutor
from datetime import datetime
from datetime import timedelta
import sqlite3

delta_2min = timedelta(minutes=2)


def build_stock_day_data():
    os.system("python 3_build_stock_day_data.py")


sched = BackgroundScheduler()
sched.add_job(build_stock_day_data,
              'interval',
              minutes=3,
              id='001',
              next_run_time=datetime.now(),
              max_instances=1)

sched.start()

while 1:
    print(sched.get_job('001'))

    time.sleep(60)