Esempio n. 1
8
import os, time
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler

def myjob():
	print(datetime.now())

if __name__ == '__main__':
	scheduler = BackgroundScheduler()
	scheduler.start()
	job = scheduler.add_job(myjob, 'interval', seconds=1, id='myjob')
	print(job)

	jobs = scheduler.get_jobs()
	print(jobs)

	try:
		time.sleep(5)		
		print('pause job')
		scheduler.pause_job('myjob')
		time.sleep(5)
		print('resume job')
		scheduler.resume_job('myjob')

		print('reschedule job ...')
		scheduler.reschedule_job('myjob', trigger='cron', second='*/5')
		time.sleep(10)
	except (KeyboardInterrupt, SystemExit):
		scheduler.shutdown()
Esempio n. 2
0
def encode_compute():
    scheduler = BackgroundScheduler()
    scheduler.start()
    message = request.form['message']
    image = request.files['img']
    if not message or not image:
        return redirect(url_for('steganography_web.encode'))

    filename = 'website/static/images/' + str(
        session['uid']) + image.filename.split('.')[0] + '.png'
    image.save(filename)
    session['steganography_image'] = filename

    binary_string = encode_string(message) + encode_string(END_OF_ENCODE)
    image = encode_image(filename, binary_string)

    time = datetime.datetime.now() + datetime.timedelta(minutes=4)
    if scheduler.get_job(filename):
        scheduler.reschedule_job(job_id=filename,
                                 trigger='date',
                                 run_date=time)
        print(f'job rescheduled for {time}')
    else:
        scheduler.add_job(delete_file,
                          args=[filename],
                          trigger='date',
                          run_date=time,
                          id=filename)
        print(f'job scheduled for {time}')

    send_days_left_text()
    return redirect(url_for('steganography_web.encode', show='True'))
def test8():
    """定时执行任务,修改作业"""
    scheduler = BackgroundScheduler()
    job1 = scheduler.add_job(my_job,
                             'interval',
                             args=('123', ),
                             seconds=2,
                             id='my_job_id',
                             max_instances=2)
    # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print('开始scheduler')
    scheduler.start()
    time.sleep(2)
    print('再添加一个实例')
    job = scheduler.add_job(tick, 'interval', seconds=2, id='id2')
    # add_job函数这里有一个max_instances参数用于指定当前工作同时运行的实例数量,

    time.sleep(4)
    job.modify(
        max_instances=6, name='Alternate name'
    )  # 上一个工作正在运行而未结束,那么下一个就认为失败, 那么此时可以为一个特定的作业设置最大数量的调度程序;可以为一个特定的作业设置最大数量的调度程序
    # scheduler.reschedule_job('my_job_id', trigger='cron', minute='*/5') # 每5分钟执行一次,具体参见cron表达式
    print('修改执行时间间隔')
    scheduler.reschedule_job('id2', trigger='cron',
                             second='*/1')  # 每1秒执行一次,具体参见cron表达式
    time.sleep(3)
    print('ok')
Esempio n. 4
0
class Scheduler:
    def __init__(self):
        self.jobs = {}
        self.scheduler = BackgroundScheduler({
            'apscheduler.job_defaults.coalesce':
            'true',
        })

    def addJob(self, policy, job_function, job_name):
        # Check that a job with the same name has not already be registered
        if self.jobs.has_key(job_name):
            raise Exception('A job named "' + job_name +
                            '" has already been scheduled')
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Add the job to the scheduler
        j = self.scheduler.add_job(job_function,
                                   name=job_name,
                                   max_instances=1,
                                   trigger=trigger)
        getLogger(
            __name__).debug('Job "' + job_name +
                            '" has been added to scheduler, it has the id ' +
                            j.id + '. It is scheduled every ' + policy)
        # Store job if so that we can update it if needed
        self.jobs[job_name] = j.id

    def rescheduleJob(self, policy, job_name):
        # Check that the job with job_name well exist
        if not (self.jobs.has_key(job_name)):
            raise Exception(
                'Job named "' + job_name +
                '" can not be rescheduled because it is not registered in scheduler'
            )
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Reschedule the job with the new trigger
        getLogger(__name__).debug('Reschedule job "' + job_name +
                                  '" having id ' + self.jobs.get(job_name))
        self.scheduler.reschedule_job(self.jobs.get(job_name), trigger=trigger)

    def start(self):
        self.scheduler.start()
        getLogger(__name__).info('Start scheduler')

    def stop(self):
        self.scheduler.shutdown()
        getLogger(__name__).info('Stop scheduler')

    def _getTrigger(self, policy):
        comp = re.compile("^([0-9]*)([smhd])?$")
        match = comp.match(policy)
        policy = match.groups()
        if (policy[0] == '' or policy[0] == None):
            raise Exception(
                "The periodicity of your task is not well defined.")
        else:
            period = int(policy[0])
        return IntervalTrigger(seconds=period)
Esempio n. 5
0
class ApsScheduler(Scheduler):
    def __init__(self):
        self._scheduler = BackgroundScheduler()

    def start(self):
        self._scheduler.add_jobstore(DjangoJobStore(), 'default')
        self._scheduler.start()

    def add_job(self, func: Callable, func_kwargs: dict,
                cron_schedule: CronSchedule) -> str:
        job = self._scheduler.add_job(func=func,
                                      kwargs=func_kwargs,
                                      trigger='cron',
                                      start_date=cron_schedule.start_date,
                                      end_date=cron_schedule.end_date,
                                      year=cron_schedule.year,
                                      month=cron_schedule.month,
                                      day=cron_schedule.day,
                                      week=cron_schedule.week,
                                      day_of_week=cron_schedule.day_of_week,
                                      second=cron_schedule.second,
                                      minute=cron_schedule.minute,
                                      hour=cron_schedule.hour)

        return job.id

    def modify(self, cron_schedule: CronSchedule):
        for job_id in self._get_all_affected_job_ids(cron_schedule.pk):
            self._scheduler.reschedule_job(
                job_id,
                trigger='cron',
                start_date=cron_schedule.start_date,
                end_date=cron_schedule.end_date,
                year=cron_schedule.year,
                month=cron_schedule.month,
                day=cron_schedule.day,
                week=cron_schedule.week,
                day_of_week=cron_schedule.day_of_week,
                second=cron_schedule.second,
                minute=cron_schedule.minute,
                hour=cron_schedule.hour)

    def delete(self, cron_schedule_id: int):
        for job_id in self._get_all_affected_job_ids(cron_schedule_id):
            self._scheduler.remove_job(job_id)

    def delete_job(self, job_id):
        self._scheduler.remove_job(job_id)

    @staticmethod
    def _get_all_affected_job_ids(cron_schedule_id):
        objs = Timelapse.objects.filter(schedule_id=cron_schedule_id)
        for obj in objs:
            assert isinstance(obj, Timelapse)
            yield obj.schedule_job_id
Esempio n. 6
0
def new_season(idS):
    # 3 temps
    # fermer la précédente saisons
    # lancer la nouvelle saison
    # lancer la saison dans 1 an
    Dday, Dmonth, Dyear, dict_Dates = parse_dates()
    idS_old = 0

    if idS == 1:
        idS_old = 4
    else:
        idS_old = idS - 1

    print("Nouvelle saison en cours de lancement...")
    es, nb_invest, bank_tot = end_season()
    if es:

        # Calcul du multiplicateur
        try:
            m_x = bank_tot / nb_invest
            print("m_x : {}".format(m_x))
        except ValueError:
            m_x = 10

        m_n = dict_Dates["mult"]
        print("m_n : {}".format(m_n))
        if m_n == 0:
            m_n = 2

        try:
            mult = math.log2(m_n) * math.log10(m_x)
            print("mult : {}".format(mult))
        except ValueError:
            mult = 1
        # Ecriture de la nouvelle année et du nouveau mult dans le fichier
        dict_Dates[idS_old] = dict_Dates[idS_old][:6] + str(Dyear[idS_old] + 1)
        dict_Dates["mult"] = mult
        dict_Dates["total"] = dict_Dates["total"] + 1
        path = "core/saisons.json"
        with open(path, 'w', encoding='utf-8') as json_file:
            json.dump(dict_Dates, json_file, indent=4)

        # Reprogrammation de la nouvelle date
        scheduler = BackgroundScheduler()

        scheduler.reschedule_job('S' + str(idS_old),
                                 trigger='date',
                                 run_date=date(int(dict_Dates[idS_old][6:]),
                                               Dmonth[int(idS_old)],
                                               Dday[int(idS_old)]))

    else:
        print("\n\n\n ...Impossible de mettre fin à la précédente saison.")
Esempio n. 7
0
class Scheduler:
    def __init__(self):
        self.jobs = {}
        self.scheduler = BackgroundScheduler({
            'apscheduler.job_defaults.coalesce': 'true',
        })


    def addJob(self, policy, job_function, job_name):
        # Check that a job with the same name has not already be registered
        if self.jobs.has_key(job_name):
            raise Exception ('A job named "' + job_name + '" has already been scheduled')
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Add the job to the scheduler
        j = self.scheduler.add_job(job_function, name = job_name, max_instances = 1, trigger=trigger)
        getLogger(__name__).debug('Job "' + job_name +'" has been added to scheduler, it has the id ' + j.id + '. It is scheduled every ' + policy)
        # Store job if so that we can update it if needed
        self.jobs[job_name] = j.id


    def rescheduleJob(self, policy, job_name):
        # Check that the job with job_name well exist
        if not(self.jobs.has_key(job_name)):
            raise Exception ('Job named "' + job_name + '" can not be rescheduled because it is not registered in scheduler')
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Reschedule the job with the new trigger
        getLogger(__name__).debug('Reschedule job "' + job_name +'" having id ' + self.jobs.get(job_name))
        self.scheduler.reschedule_job(self.jobs.get(job_name), trigger=trigger)


    def start(self):
        self.scheduler.start()
        getLogger(__name__).info('Start scheduler')


    def stop(self):
        self.scheduler.shutdown()
        getLogger(__name__).info('Stop scheduler')


    def _getTrigger(self, policy):
        comp = re.compile("^([0-9]*)([smhd])?$")
        match = comp.match(policy)
        policy = match.groups()
        if (policy[0] == '' or policy[0] == None):
            raise Exception ("The periodicity of your task is not well defined.")
        else:
            period = int(policy[0])
        return IntervalTrigger(seconds=period)
class ManageSchedule(ManageObject,Device):
    
    def __init__(self):
        """Constructeur de notre classe"""
        self._timezone = Device.config['timezone']
        self._connexion_mode = Device.config['connexion_mode']
        self._scheduler = ''
        self._DateToCompare = ''
        self._Now = ''
        self.DateFileModification =''
        
    def _Get_Timezone (self):
        return self._timezone
    
    def _Set_Scheduler_With_Timezone (self,_timezone):
        self._scheduler = BackgroundScheduler(timezone=_timezone,replace_existing=True)

    def _Set_DateToCompare(self):
        self._DateToCompare = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
    def _Get_DateFileModification(self,file):
        return datetime.datetime.fromtimestamp(os.stat(file).st_mtime).strftime('%Y-%m-%d %H:%M:%S')
    
    def LaunchSchedule(self,):
        self._Set_Scheduler_With_Timezone(self._timezone)
        self.DefaultJob()
        self._scheduler.start()  
        
    def updateSchedule(self,_file,_state,_type,id_sched):
        if self._DateToCompare =='':
            self.UpdateObject(_file,_state,_type)
            self._Set_DateToCompare()
        if self._DateToCompare > self._Get_DateFileModification(_file):
            print('la date a comparer est plus grande je ne fais rien')
        else:
            print('la date a comparer est plus petite je  fais qqchose')
            self._scheduler.reschedule_job(id_sched, trigger='cron', minute='*/1')
            self.UpdateObject(_file,_state,_type)      
            self._Set_DateToCompare()
            self._scheduler.reschedule_job(id_sched, trigger='cron',second='*')     
                
    def DefaultJob(self):
        self._scheduler.add_job(self.updateSchedule,args=['main.json','enable','component','sched_component'], trigger='cron', second='*',id='sched_component')
        self._scheduler.add_job(self.updateSchedule,args=['main.json','enable','plugins','sched_plugin'], trigger='cron', second='*',id='sched_plugin')
def mandelbrot():
    scheduler = BackgroundScheduler()
    scheduler.start()
    try:
        session['uid']
    except:
        session['uid'] = uuid.uuid4()

    if 'r' in request.values and 'i' in request.values:
        real = float(request.values['r'])
        imaginary = float(request.values['i'])

        image = create(real, imaginary)
        filename = 'website/static/images/' + str(
            session['uid']) + 'fractal.png'
        # image.save('static/images/fractal.png')
        image.save(filename)

        time = datetime.datetime.now() + datetime.timedelta(seconds=10)
        if scheduler.get_job(filename):
            scheduler.reschedule_job(job_id=filename,
                                     trigger='date',
                                     run_date=time)
            print(f'job rescheduled for {time}')
        else:
            scheduler.add_job(delete_file,
                              args=[filename],
                              trigger='date',
                              run_date=time,
                              id=filename)
            print(f'job scheduled for {time}')

        return render_template("mandelbrot.html",
                               image='/' + filename[8:] + '?' +
                               str(rand.randint(1000)),
                               real=real,
                               imag=imaginary)

    else:
        return render_template("mandelbrot.html",
                               image='/static/images/defaultFractal.png',
                               real=0,
                               imag=0)
Esempio n. 10
0
def test8():
    """定时执行任务,修改作业"""
    scheduler = BackgroundScheduler() 
    job1 = scheduler.add_job(my_job, 'interval', args=('123',),seconds=2, id='my_job_id',max_instances=2) 
    # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print('开始scheduler')
    scheduler.start()
    time.sleep(2)
    print('再添加一个实例')
    job = scheduler.add_job(tick, 'interval', seconds=2,id='id2') 
    # add_job函数这里有一个max_instances参数用于指定当前工作同时运行的实例数量,
    
    time.sleep(4)
    job.modify(max_instances=6, name='Alternate name') # 上一个工作正在运行而未结束,那么下一个就认为失败, 那么此时可以为一个特定的作业设置最大数量的调度程序;可以为一个特定的作业设置最大数量的调度程序
    # scheduler.reschedule_job('my_job_id', trigger='cron', minute='*/5') # 每5分钟执行一次,具体参见cron表达式
    print('修改执行时间间隔')
    scheduler.reschedule_job('id2', trigger='cron', second='*/1') # 每1秒执行一次,具体参见cron表达式
    time.sleep(3)
    print('ok')
Esempio n. 11
0
class Scheduler:
    def startup_scheduler(self):
        self.scheduler = BackgroundScheduler()
        logger.info("----INIT SCHEDULE OBJECT-----")
        self.scheduler.start()

        self.scheduler.add_job(auto_backup_job,
                               trigger="cron",
                               hour="3",
                               max_instances=1)
        settings = SiteSettings.get_site_settings()
        time = cron_parser(settings.webhooks.webhookTime)

        self.webhook = self.scheduler.add_job(
            post_webhooks,
            trigger="cron",
            name="webhooks",
            hour=time.hours,
            minute=time.minutes,
            max_instances=1,
        )

        logger.info(self.scheduler.print_jobs())

    def reschedule_webhooks(self):
        """
        Reads the site settings database entry to reschedule the webhooks task
        Called after each post to the webhooks endpoint.
        """
        settings = SiteSettings.get_site_settings()
        time = cron_parser(settings.webhooks.webhookTime)

        self.scheduler.reschedule_job(
            self.webhook.id,
            trigger="cron",
            hour=time.hours,
            minute=time.minutes,
        )

        logger.info(self.scheduler.print_jobs())
Esempio n. 12
0
def add_scheduler():
    jobstores = {'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')}
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults,
                                    timezone=utc)

    scheduler.start()

    scheduler.reschedule_job(job_id='covid_job',
                             trigger='interval',
                             minutes=app.config['REQUEST_INTERVAL'])
    scheduler.reschedule_job(job_id='currencies_job',
                             trigger='interval',
                             minutes=app.config['REQUEST_INTERVAL'])

    scheduler.print_jobs()
def update_game_thread():
    log.info("开始启动定时更新游戏线程...")
    pre_update_time = DEFAULT_GAME_UPDATE_TIME
    update_job_id = "update_game"

    parse_time(pre_update_time)

    # 启动定时调度器框架
    scheduler = BackgroundScheduler(logger=log, timezone="Asia/Shanghai")
    scheduler.add_job(update_game,
                      trigger="cron",
                      id=update_job_id,
                      **parse_time(pre_update_time))
    scheduler.start()

    SLEEP_TIME = 30
    while True:

        while True:
            try:
                # 获取当前游戏更新时间
                time_str = DeviceGameService.get_game_update_time(cache_client)
                if time_str == pre_update_time:
                    log.info("当前定时更新游戏时间: {}".format(time_str))
                    break

                log.info("当前游戏更新时间发生变更: pre = {} cur = {}".format(
                    pre_update_time, time_str))
                pre_update_time = time_str
                scheduler.reschedule_job(update_job_id,
                                         trigger="cron",
                                         **parse_time(time_str))
            except Exception as e:
                log.error("游戏更新调度周期异常:")
                log.exception(e)
            break

        time.sleep(SLEEP_TIME)
class Scheduler:
    def __init__(self):
        self.sched = BackgroundScheduler()
        self.sched.start()
        self.job_id = ''

    def __del__(self):
        self.sched.shutdown()

    def shutdown(self):
        self.sched.shutdown()

    def stop(self, job_id):
        try:
            self.sched.remove_job(job_id)
        except JobLookupError as error:
            print(f"fail to stop Scheduler : {error}.")
            return

    def restart(self, job_id, run_time):  #run_time format : %H:%M
        hour, minute = map(int, run_time.split(':'))
        self.sched.reschedule_job(job_id,
                                  trigger='cron',
                                  hour=hour,
                                  minute=minute)

    def add_job(self, func_name, func_param, run_dayOfWeek, run_time,
                job_name):  #run_time format : %H:%M
        hour, minute = map(int, run_time.split(':'))
        self.sched.add_job(func_name,
                           args=func_param,
                           trigger='cron',
                           day_of_week=run_dayOfWeek,
                           hour=hour,
                           minute=minute,
                           id=job_name,
                           name=job_name)
class JobScheduler(ISingleton):
    @inject
    def __init__(
        self,
        database_config: DatabaseConfig,
        database_session_manager: DatabaseSessionManager,
    ):
        self.database_session_manager: DatabaseSessionManager = database_session_manager
        self.database_config: DatabaseConfig = database_config
        self.scheduler: BackgroundScheduler = None

    def run(self):
        self.run_scheduler()
        print("job_process started")

    def run_scheduler(self):
        jobstores = {
            'default':
            SQLAlchemyJobStore(url=self.database_config.connection_string,
                               tablename='ApSchedulerJobsTable',
                               engine=self.database_session_manager.engine,
                               metadata=IocManager.Base.metadata,
                               tableschema='Aps')
        }
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 5}
        self.scheduler = BackgroundScheduler(daemon=True,
                                             jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults)
        JobSchedulerEvent.job_scheduler_type = JobScheduler
        self.scheduler.add_listener(JobSchedulerEvent.listener_finish,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_added,
                                    EVENT_JOB_ADDED)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_submitted,
                                    EVENT_JOB_SUBMITTED)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_removed,
                                    EVENT_JOB_REMOVED)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_all_jobs_removed,
            EVENT_ALL_JOBS_REMOVED)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_job_others,
            EVENT_JOB_MODIFIED | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_scheduler_other_events,
            EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN
            | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
            | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED
            | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED)
        self.scheduler.start()

        print('To clear the alarms, delete the example.sqlite file.')
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

    def add_job_with_date(self,
                          job_function,
                          run_date,
                          args=None,
                          kwargs=None) -> Job:
        # if run_date is None:
        #     run_date = datetime.now() + timedelta(seconds=10)
        job: Job = self.scheduler.add_job(job_function,
                                          'date',
                                          run_date=run_date,
                                          misfire_grace_time=30000,
                                          args=args,
                                          kwargs=kwargs)
        return job

    def add_job_with_cron(self,
                          job_function,
                          cron: CronTrigger,
                          args=None,
                          kwargs=None) -> Job:
        # if cron.start_date is not None and cron.start_date < datetime.now().astimezone(get_localzone()):
        #     cron.start_date = None
        # if cron.end_date is not None and cron.end_date < datetime.now().astimezone(get_localzone()):
        #     cron.end_date = None
        job: Job = self.scheduler.add_job(job_function,
                                          cron,
                                          misfire_grace_time=15,
                                          args=args,
                                          kwargs=kwargs)
        return job

    def remove_job(self, job_id):
        self.scheduler.remove_job(job_id)

    def modify_job(self, job_id, jobstore=None, **changes):
        return self.scheduler.modify_job(job_id, jobstore, **changes)

    def reschedule_job(self,
                       job_id,
                       jobstore=None,
                       trigger=None,
                       **trigger_args):
        return self.scheduler.reschedule_job(job_id, jobstore, trigger,
                                             **trigger_args)

    def pause_job(self, job_id, jobstore=None):
        return self.scheduler.pause_job(job_id, jobstore)

    def resume_job(self, job_id, jobstore=None):
        return self.scheduler.resume_job(job_id, jobstore)

    def remove_job(self, job_id, jobstore=None):
        self.scheduler.remove_job(job_id, jobstore)

    def get_job(self, job_id):
        return self.scheduler.get_job(job_id)

    def get_jobs(self, jobstore=None):
        return self.scheduler.get_jobs(jobstore)
Esempio n. 16
0
class SchedulerManager(object):

    # __metaclass__ = ABCMeta
    global _mongoclient

    def __init__(self):
        self.jobstores = {
            'mongo':
            MongoDBJobStore(collection='job1',
                            database='saasjob',
                            client=_mongoclient),
            'default':
            MemoryJobStore()
        }
        self.executors = {
            'default': ThreadPoolExecutor(1),
            'processpool': ProcessPoolExecutor(1)
        }
        self.job_defaults = {
            'coalesce': False,
            'misfire_grace_time': 1,
            'max_instances': 1
        }
        self._sched = BackgroundScheduler(jobstores=self.jobstores,
                                          executors=self.executors,
                                          job_defaults=self.job_defaults)
        # 添加 任务提交 事件监听
        self._sched.add_listener(self.when_job_submitted, EVENT_JOB_SUBMITTED)
        # 添加 任务执行完成 事件监听
        self._sched.add_listener(self.when_job_executed, EVENT_JOB_EXECUTED)
        # 添加 任务异常退出 事件监听
        self._sched.add_listener(self.when_job_crashed, EVENT_JOB_ERROR)
        self._jobs = {}
        self._jobhandlers = {}  # format, key: jobid,  value: jobhandler
        self._jobs_key = ["name", "func", "args", "kwargs"]
        self.start()

    def cmd_valid(self, cmd):
        cmd = cmd.strip()
        if cmd.startswith("python"):
            return True
        else:
            return False

    def get_job_trigger(self, _job):
        # ('trigger', <CronTrigger (second='4', timezone='Asia/Shanghai')>)
        _trigger = self._get_job_attr(_job, "trigger")
        # options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
        if _trigger:
            return dict([(f.name, f.__str__()) for f in _trigger.fields
                         if not f.is_default])
        else:
            return {}

    # 获取job属性
    def _get_job_attr(self, _job, attr):
        try:
            result = eval("_job.%s" % attr)
            return result
        except:
            import traceback
            print(traceback.print_exc())
            return None

    def when_job_submitted(self, event):
        try:
            job_id = event.job_id
            if job_id not in self._jobhandlers and job_id in self._jobhandlers:
                self._jobhandlers.setdefault(job_id,
                                             JobHandler(self._jobs[job_id]))
            jobhandler = self._jobhandlers[event.job_id]
            jobhandler.when_job_submitted()
            print("%s submitted at %s" %
                  (event.job_id,
                   time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    def when_job_executed(self, event):
        try:
            job_id = event.job_id
            if job_id not in self._jobhandlers:
                self._jobhandlers.setdefault(job_id,
                                             JobHandler(self._jobs[job_id]))
            jobhandler = self._jobhandlers[event.job_id]
            jobhandler.when_job_executed()
            print("%s executed at %s" %
                  (event.job_id,
                   time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    def when_job_crashed(self, event):
        try:
            if event.exception:
                job_id = event.job_id
                if job_id not in self._jobhandlers:
                    self._jobhandlers.setdefault(
                        job_id, JobHandler(self._jobs[job_id]))
                jobhandler = self._jobhandlers[event.job_id]
                jobhandler.when_job_crashed()
                print("%s crashed at %s" %
                      (event.job_id,
                       time.strftime("%Y-%m-%d %H:%M:%S",
                                     time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    # 添加例行任务,crontab 格式
    def addCron(self, cmd, **params):
        try:
            create_jobid = uuid.uuid4().hex
            if not self.cmd_valid(cmd):
                return {"errinfo": "wrong cmd"}
            jobcmdobj = JobCmd(cmd)
            data = params.get("data", {})
            jobcmdobj.set_jobid(create_jobid)
            s = params.get("second",
                           None) if params.get("second", None) != "*" else None
            m = params.get("minute",
                           None) if params.get("minute", None) != "*" else None
            h = params.get("hour",
                           None) if params.get("hour", None) != "*" else None
            d = params.get("day",
                           None) if params.get("day", None) != "*" else None
            dw = params.get(
                "day_of_week",
                None) if params.get("day_of_week", None) != "*" else None
            mnth = params.get(
                "month", None) if params.get("month", None) != "*" else None
            y = params.get("year",
                           None) if params.get("year", None) != "*" else None
            _job = self._sched.add_job(jobcmdcallable,
                                       'cron',
                                       year=y,
                                       month=mnth,
                                       day=d,
                                       day_of_week=dw,
                                       hour=h,
                                       minute=m,
                                       second=s,
                                       args=[jobcmdobj, data],
                                       executor="processpool",
                                       jobstore="mongo",
                                       id=create_jobid)
            self._jobhandlers.setdefault(create_jobid, JobHandler(_job))
            # 保存 job 属性
            return {"job_id": create_jobid}
        except:
            import traceback
            print(traceback.print_exc(), cmd, params)
            return False

    # 修改 job 属性
    def modifyJobAttr(self, job_id, **changes):
        try:
            _job = self._sched.modify_job(job_id=job_id, **changes)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, changes)
            return False

    def modifyJobData(self, job_id, data):
        try:
            args = self._get_job_attr(self._jobhandlers[job_id].job, "args")
            # args_copy = [item for item in args]
            for key in data:
                args[1][key] = data[key]
            _job = self._sched.modify_job(job_id, args=args)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, data)
            return False

    # 修改执行时间,crontab 格式
    def modifyJobFreq(self, job_id, cronargs):
        try:
            _job = self._sched.reschedule_job(job_id,
                                              trigger='cron',
                                              **cronargs)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, cronargs)
            return False

    # 删除 job
    def removeFromCron(self, job_id):
        try:
            self._sched.remove_job(job_id)
            if job_id in self._jobhandlers:
                self._jobhandlers.pop(job_id)
            if job_id in self._jobs:
                self._jobs.pop(job_id)
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id)
            return False

    def job_exists(self, job_id):
        if job_id in self._jobhandlers or job_id in self._jobs:
            if job_id not in self._jobhandlers and job_id in self._jobs:
                self._jobhandlers[job_id] = JobHandler(self._jobs[job_id])
            elif job_id in self._jobhandlers and job_id not in self._jobs:
                self._jobs[job_id] = self._jobhandlers[job_id].job
            return True
        else:
            return False

    # 根据 job id 查询任务信息
    def findCronJob(self, job_ids):
        result = []
        _keys = [
            "cmd", "create_stamp", "is_running", "start_stamp", "hope_runtime",
            "is_success", "is_pause", "status", "name", "desc", "allowmodify"
        ]
        for job_id in job_ids:
            print("job_exists", self.job_exists(job_id))
            if self.job_exists(job_id):
                _jobhander = self._jobhandlers[job_id]
                job_info = _jobhander.jobhandlerattr
                cron_trigger = {}
                # cron_trigger = self.get_cron_trigger(_jobhander.job)
                tmp = {}
                tmp["job_id"] = job_id
                if job_info["is_running"]:
                    execute_time = time.time() - job_info["start_stamp"]
                    tmp["running_time"] = round(execute_time, 3)
                else:
                    tmp["running_time"] = round(job_info["hope_runtime"], 3)
                for key in _keys:
                    v = job_info.get(key, None)
                    if key == "is_running":
                        tmp["finished"] = False if job_info[
                            "is_running"] else True
                    else:
                        tmp[key] = v
                if tmp["finished"]:
                    tmp["completed_per"] = 1.0
                else:
                    tmp["completed_per"] = round(
                        tmp["running_time"] /
                        max([tmp["running_time"], tmp["hope_runtime"]]), 3)
                # del tmp["hope_runtime"]
                # del tmp["is_success"]
                # del tmp["is_pause"]
                tmp.pop("hope_runtime")
                tmp.pop("is_success")
                tmp.pop("is_pause")
                _result = dict(tmp, **cron_trigger)
                print("_result", _result)
                if _result["status"] == 3:
                    _result["completed_per"] = 0
                    _result["running_time"] = 0
                    _result["start_stamp"] = None
                result.append(_result)
            else:
                result.append({"job_id": job_id, "errinfo": "no exists"})
        return result

    def getAllJobInfo(self):
        try:
            result = self.findCronJob(
                set(self._jobhandlers.keys()) | set(self._jobs.keys()))
            return result
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def start_addition(self):
        for _job in self._sched.get_jobs():
            job_id = self._get_job_attr(_job, "id")
            self._jobs.setdefault(job_id, _job)

    def start(self):
        try:
            self._sched.start()
            self._sched.pause()
            self.start_addition()
            self._sched.resume()
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def stop(self, iswait=True):
        try:
            self._sched.shutdown(wait=iswait)
            self._jobhandlers.clear()
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def pause_job(self, job_id):
        try:
            self._sched.pause_job(job_id=job_id)
            self._jobhandlers[job_id].ispause = True
            self._jobhandlers[job_id].status = 3
            self._jobhandlers[job_id].isrunning = False
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def resume_job(self, job_id):
        try:
            self._sched.resume_job(job_id=job_id)
            self._jobhandlers[job_id].ispause = False
            self._jobhandlers[job_id].status = 1
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False
Esempio n. 17
0
class DataCollector:
    def __init__(self):
        self.logger = get_logger('data-collector')

        self.scheduler = BackgroundScheduler(timezone="Asia/Seoul")
        self.scheduler.start()
        self.templates = dict()

        self.__global_store = dict()

        self.job_broker = Celery('routine-jobs',
                                 broker=BROKER_URL,
                                 backend=CELERY_RESULT_BACKEND)

    # =========================================================================
    def add_job_schedules(self, schedule_templates: list):
        for schedule_template in schedule_templates:
            schedule_name, trigger = operator.itemgetter(
                'schedule_name', 'trigger')(schedule_template)

            # schedule name can't be duplicated.
            schedule_names = [
                x['schedule_name'] for x in self.get_schedule_jobs()
            ]
            if schedule_name in schedule_names:
                msg = f'The schedule name \'{schedule_name}\' is already assigned.'
                self.logger.error(msg)
                raise ExceptionScheduleReduplicated(msg)

            self._add_job_schedule(schedule_name,
                                   trigger_type=trigger['type'],
                                   trigger_setting=trigger['setting'])

            # store the schedule template
            self.templates[schedule_name] = schedule_template
            self.__global_store[schedule_name] = {'_gv': dict()}

    # =========================================================================
    def _add_job_schedule(self, key, trigger_type, trigger_setting):
        if trigger_type == 'crontab' and 'crontab' in trigger_setting:
            crontab = self.crontab_add_second(trigger_setting['crontab'])
            trigger_type = 'cron'
            trigger_setting = {**trigger_setting, **crontab}
            del trigger_setting['crontab']

        arguments = dict(func=self.request_data,
                         args=(key, ),
                         id=key,
                         trigger=trigger_type)
        arguments = {**arguments, **trigger_setting}

        self.scheduler.pause()
        try:
            self.scheduler.add_job(**arguments)
        finally:
            self.scheduler.resume()

    # =========================================================================
    def remove_job_schedule(self, schedule_name: str):
        self.get_schedule_job(schedule_name)
        self.scheduler.remove_job(schedule_name)
        try:
            del self.templates[schedule_name]
            del self.__global_store[schedule_name]
        except KeyError:
            # it should be failing to collect data. such as not connecting.
            pass

        return

    # =========================================================================
    def modify_job_schedule(self, schedule_name, trigger_type, trigger_args):
        if trigger_type == 'crontab' and 'crontab' in trigger_args:
            crontab = self.crontab_add_second(trigger_args['crontab'])
            trigger = 'cron'

            setting = {**trigger_args, **crontab}
            del setting['crontab']
        else:
            trigger = trigger_type
            setting = trigger_args

        job = self.scheduler.get_job(schedule_name)
        job.reschedule(trigger, **setting)
        self.templates[schedule_name]['trigger'] = dict(type=trigger_type,
                                                        setting=trigger_args)

    # =========================================================================
    @staticmethod
    def get_python_module(code, name):
        module = types.ModuleType(name)
        exec(code, module.__dict__)
        return module

    # =========================================================================
    @staticmethod
    def insert_number_each_line(data: str):
        result = list()
        data = data.split('\n')
        for (number, line) in enumerate(data):
            result.append(f'{number+1:04} {line}')
        return '\n'.join(result)

    # =========================================================================
    @staticmethod
    def filter_dict(dict_to_filter, thing_with_kwargs):
        sig = inspect.signature(thing_with_kwargs)
        filter_keys = [
            param.name for param in sig.parameters.values()
            if param.kind == param.POSITIONAL_OR_KEYWORD
        ]
        filtered_dict = {
            filter_key: dict_to_filter[filter_key]
            for filter_key in filter_keys
        }
        return filtered_dict

    # =========================================================================
    def _source(self, name, setting):
        source_type, code, arguments = operator.itemgetter(
            'type', 'code', 'arguments')(setting)
        module = DataCollector.get_python_module(code, name)
        try:
            _gv = self.__global_store[name]
            arguments = {**arguments, **_gv}
            filterd_arguments = DataCollector.filter_dict(
                arguments, module.main)
            data = module.main(**filterd_arguments)
        except Exception as e:
            code = DataCollector.insert_number_each_line(code)
            self.logger.error(f'{e}\ncode: \n{code}')
            raise
        return data

    # =========================================================================
    def request_data(self, schedule_name):
        schedule = self.templates[schedule_name]
        if schedule_name not in self.templates:
            msg = f'The template "{schedule_name}" ' \
                  f'is not in the main template store'
            self.logger.error(msg)
            raise KeyError(msg)

        # checking use flag
        if not schedule['use']:
            self.logger.info(f'{schedule_name} is disabled.')
            return

        # source
        data = self._source(schedule_name, schedule['source'])
        if data is None:
            message = f'[{schedule_name}] The user function returned None.'
            self.logger.warning(message)

        # works
        # calling function for each works with arguments via celery
        for work in schedule['works']:
            work_type, arguments = operator.itemgetter('type',
                                                       'arguments')(work)
            self.job_broker.send_task(work_type,
                                      args=(data, ),
                                      kwargs=arguments)

        # sending events to the event
        # PING
        # event emitting
        data = json.dumps(data)
        event = {
            'name': schedule_name,
            'event': {
                'type': 'data-collector',
                'schedule_name': schedule_name
            },
            'data': data
        }
        try:
            self.emit_event(schedule_name, event)
        except (urllib3.exceptions.MaxRetryError,
                requests.exceptions.ConnectionError) as e:
            self.logger.error(f'Connection Error: Failed to emit events.')
        except Exception as e:
            import traceback
            traceback.print_exc()
        return

    # =========================================================================
    def emit_event(self, name: str, event: dict):
        with requests.Session() as s:
            api = EVENT_COLLECTOR_URL + '/api/v1/events/emit'
            response = s.post(api, json=event)
            if response.status_code != 200:
                raise Exception(f'code: {response.status_code}\n'
                                f'messages: [{name}] - {response.reason}')
            data = json.loads(response.text)
            self.logger.info(f'[{name}] emitted a event.')

    # =========================================================================
    def remove_job_schedule(self, _id: str):
        self.scheduler.remove_job(_id)
        del self.data[_id]
        return

    # =========================================================================
    def modify_job_schedule(self, _id, seconds):
        self.scheduler.reschedule_job(_id, trigger='interval', seconds=seconds)

    # =========================================================================
    def get_schedule_jobs(self):
        jobs = self.scheduler.get_jobs()
        if not jobs:
            return jobs
        result = list()
        for job in jobs:
            schedule_name = job.id
            next_run_time = job.next_run_time
            template_data = self.templates[schedule_name]
            template_data['next_run_time'] = next_run_time
            result.append(template_data)
        return result
Esempio n. 18
0
class config:
    __white = (255, 255, 255)
    __red = (255, 0, 0)
    __color = (0, 255, 255)
    __colorInv = __red
    __colorFile = "/home/pi/SmartMirror/data/configFiles/colorFile.txt"
    __brightnessFile = "/home/pi/SmartMirror/data/configFiles/brightnessFile.txt"
    __fontFile = "/home/pi/SmartMirror/data/configFiles/fontFile.txt"
    __speedFile = "/home/pi/SmartMirror/data/configFiles/speedFile.txt"
    __viewFile = "/home/pi/SmartMirror/data/configFiles/viewFile.txt"
    __sleepFile = "/home/pi/SmartMirror/data/configFiles/sleepTimer.txt"

    __fileIO = fileIO()
    __defFontName = "Serif"

    def __init__(self):
        self.__vcgencmd = Vcgencmd()
        self.__pwm = pigpio.pi()
        self.__valChange = ["None", "None", "None"]
        self.__fonts = [0] * 8
        self.__cSize = 0

        self.__scheduler = BackgroundScheduler()
        self.__scheduler.start()

        self.__sleepF1 = True
        self.__sleepF2 = True

        self.__stayOffFlag = False

    def getStayOffFlag(self):
        return self.__stayOffFlag

    def setStayOffFlag(self, state):
        self.__stayOffFlag = state

    def restartDim(self):
        sleep = self.__fileIO.simpleRead(self.__sleepFile, multiLine=True)
        try:
            self.__scheduler.reschedule_job("dim",
                                            trigger='interval',
                                            minutes=int(sleep[2]))
        except:
            print(datetime.now().strftime("%H:%M:%S"),
                  "IN config::restartDim: cannot reschedule job!")
            return -1

    def setColor(self):
        color, colorInv = self.__fileIO.simpleRead(self.__colorFile,
                                                   separator=":")
        self.__color = [
            int(color[i:i + 2], 16) for i in range(1,
                                                   len(color) - 1, 2)
        ]
        self.__colorInv = [
            int(colorInv[i:i + 2], 16) for i in range(1,
                                                      len(colorInv) - 1, 2)
        ]
        if (self.__color == self.__white):
            self.__colorInv = self.__red

        return self.__color, self.__colorInv

    def setSleepTime(self):
        sleep = self.__fileIO.simpleRead(self.__sleepFile, multiLine=True)
        if (len(sleep) > 4):
            if (self.__valChange[0] != sleep[0]
                    or self.__valChange[1] != sleep[1]):
                try:
                    self.__sleepF1 = True
                    self.__scheduler.remove_job('dpoff')
                    self.__scheduler.remove_job('dpon')
                except:
                    print(datetime.now().strftime("%H:%M:%S"),
                          "IN config::setSleepTime: jobs dont exist!")
            if (self.__valChange[2] != sleep[2]):
                try:
                    self.__sleepF2 = True
                    self.__scheduler.remove_job("dim")
                except:
                    print(datetime.now().strftime("%H:%M:%S"),
                          "IN config::setSleepTime: jobs dont exist!")

            if (sleep[3] in "False" and self.__sleepF1):
                self.__sleepF1 = False
                hr = int(sleep[0].split(":")[0])
                mi = int(sleep[0].split(":")[1])
                self.__scheduler.add_job(self.displayOff,
                                         "cron",
                                         hour=hr,
                                         minute=mi,
                                         id='dpoff')
                hr = int(sleep[1].split(":")[0])
                mi = int(sleep[1].split(":")[1])
                self.__scheduler.add_job(self.displayOn,
                                         "cron",
                                         hour=hr,
                                         minute=mi,
                                         id='dpon')
            elif (sleep[3] in "True" and not self.__sleepF1):
                self.__sleepF1 = True
                self.__scheduler.remove_job('dpoff')
                self.__scheduler.remove_job('dpon')
                self.__vcgencmd.display_power_on(2)
            if (sleep[4] in "False" and self.__sleepF2):
                self.__sleepF2 = False
                self.__scheduler.add_job(
                    lambda: self.__pwm.set_PWM_dutycycle(18, 0),
                    'interval',
                    minutes=int(sleep[2]),
                    id='dim')
            elif (sleep[4] in "True" and not self.__sleepF2):
                self.__sleepF2 = True
                self.__scheduler.remove_job('dim')
                self.__vcgencmd.display_power_on(2)
                self.__pwm.set_PWM_dutycycle(
                    18, int(self.__fileIO.simpleRead(self.__brightnessFile)))
                self.displayOn()
            self.__valChange = [sleep[0], sleep[1], sleep[2]]

    def displayOn(self):
        self.__vcgencmd.display_power_on(2)
        brightness = self.__fileIO.simpleRead(self.__brightnessFile)
        self.__pwm.set_PWM_dutycycle(18, int(brightness))
        self.__stayOffFlag = False

    def displayOff(self):
        self.__pwm.set_PWM_dutycycle(18, 0)
        self.__stayOffFlag = True

    def readFontFromFile(self):
        font = self.__fileIO.simpleRead(self.__fontFile, multiLine=True)
        font[0] = font[0].split(",")[0]
        font[1] = int(font[1])
        if (len(font) == 2):
            font.append(False)
            font.append(False)
        elif (len(font) == 3):
            if ('Bold' in font[2]):
                font[2] = True
                font.append(False)
            else:
                font[2] = False
                font.append(True)
        elif (len(font) == 4):
            font[2] = True
            font[3] = True
        return font

    def getFont(self, index):
        return self.__fonts[index]

    def getDefFont(self):
        return self.__defFont

    def getMetFont(self):
        return self.__metFont

    def setCSize(self, size):
        self.__cSize = size

    def getSpeed(self):
        speed = self.__fileIO.simpleRead(self.__speedFile)
        if ("Non" in speed):
            return 0
        else:
            return int(speed) * 1000

    def getView(self):
        return self.__fileIO.simpleRead(self.__viewFile, multiLine=True)[0]

    def getAllFonts(self):
        fontName, fontSize, bold, italic = self.readFontFromFile()
        try:
            self.__defFont = pygame.font.SysFont(self.__defFontName,
                                                 fontSize - 12,
                                                 bold=bold,
                                                 italic=italic)
            self.__metFont = pygame.font.SysFont(self.__defFontName,
                                                 fontSize - 4,
                                                 bold=bold,
                                                 italic=italic)
            self.__fonts[0] = pygame.font.SysFont(fontName,
                                                  fontSize + self.__cSize,
                                                  bold=bold,
                                                  italic=italic)  #calendar
            self.__fonts[1] = pygame.font.SysFont(fontName,
                                                  fontSize + 0,
                                                  bold=bold,
                                                  italic=italic)  #date
            self.__fonts[2] = pygame.font.SysFont(fontName,
                                                  fontSize + 32,
                                                  bold=bold,
                                                  italic=italic)  #time
            self.__fonts[3] = pygame.font.SysFont(fontName,
                                                  fontSize - 12,
                                                  bold=bold,
                                                  italic=italic)  #events
            self.__fonts[4] = pygame.font.SysFont(fontName,
                                                  fontSize - 12,
                                                  bold=bold,
                                                  italic=italic)  #news
            self.__fonts[5] = pygame.font.SysFont(fontName,
                                                  fontSize - 4,
                                                  bold=bold,
                                                  italic=italic)  #weather
            self.__fonts[6] = pygame.font.SysFont(fontName,
                                                  fontSize - 12,
                                                  bold=bold,
                                                  italic=italic)  #hour
            self.__fonts[7] = pygame.font.SysFont(fontName,
                                                  fontSize - 18,
                                                  bold=bold,
                                                  italic=italic)  #hour
        except:
            self.__defFont = pygame.font.Font(self.__defFontName,
                                              fontSize - 12,
                                              bold=bold,
                                              italic=italic)
            self.__metFont = pygame.font.Font(self.__defFontName,
                                              fontSize - 4,
                                              bold=bold,
                                              italic=italic)
            self.__fonts[0] = pygame.font.Font(fontName,
                                               fontSize,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[1] = pygame.font.Font(fontName,
                                               fontSize + 0,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[2] = pygame.font.Font(fontName,
                                               fontSize + 32,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[3] = pygame.font.Font(fontName,
                                               fontSize - 12,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[4] = pygame.font.Font(fontName,
                                               fontSize - 12,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[5] = pygame.font.Font(fontName,
                                               fontSize - 4,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[6] = pygame.font.Font(fontName,
                                               fontSize - 12,
                                               bold=bold,
                                               italic=italic)
            self.__fonts[7] = pygame.font.Font(fontName,
                                               fontSize - 18,
                                               bold=bold,
                                               italic=italic)
        return self.__fonts
Esempio n. 19
0
class Reconciler(object):
    RECONCILE_JOB_ID = "broccoli.worker_reconcile"

    def __init__(
        self,
        worker_config_store: WorkerConfigStore,
        worker_queue: WorkerQueue,
        pause_workers: bool,
    ):
        self.worker_config_store = worker_config_store
        self.worker_queue = worker_queue
        self.pause_workers = pause_workers
        self.reconcile_scheduler = BlockingScheduler()
        self.reconcile_scheduler.add_job(
            self.reconcile, id=self.RECONCILE_JOB_ID, trigger="interval", seconds=10
        )
        self.trigger_scheduler = BackgroundScheduler()

    def start(self):
        # Less verbose logging from apscheduler
        logging.getLogger("apscheduler").setLevel(logging.ERROR)
        # start trigger scheduler before reconcile scheduler otherwise triggers might not be actually added
        self.trigger_scheduler.start()
        self.reconcile_scheduler.start()

    def stop(self):
        self.trigger_scheduler.shutdown(wait=False)
        self.reconcile_scheduler.shutdown(wait=False)

    def reconcile(self):
        if self.pause_workers:
            logger.info("Workers have been globally paused")
            return
        actual_worker_ids = set(
            map(lambda j: j.id, self.trigger_scheduler.get_jobs())
        )  # type: Set[str]
        desired_workers = self.worker_config_store.get_all()
        desired_worker_ids = set(desired_workers.keys())  # type: Set[str]

        self.remove_workers(
            actual_worker_ids=actual_worker_ids, desired_worker_ids=desired_worker_ids
        )
        self.add_workers(
            actual_worker_ids=actual_worker_ids,
            desired_worker_ids=desired_worker_ids,
            desired_workers=desired_workers,
        )
        self.configure_workers(
            actual_worker_ids=actual_worker_ids,
            desired_worker_ids=desired_worker_ids,
            desired_workers=desired_workers,
        )

    def remove_workers(self, actual_worker_ids: Set[str], desired_worker_ids: Set[str]):
        removed_worker_ids = actual_worker_ids - desired_worker_ids
        if not removed_worker_ids:
            logger.debug("No worker to remove")
            return
        logger.info(f"Going to remove workers with id {removed_worker_ids}")
        for removed_worker_id in removed_worker_ids:
            self.trigger_scheduler.remove_job(removed_worker_id)

    def add_workers(
        self,
        actual_worker_ids: Set[str],
        desired_worker_ids: Set[str],
        desired_workers: Dict[str, WorkerMetadata],
    ):
        added_worker_ids = desired_worker_ids - actual_worker_ids
        if not added_worker_ids:
            logger.debug("No workers to add")
            return
        logger.info(f"Going to add workers with id {added_worker_ids}")
        for added_worker_id in added_worker_ids:
            self.add_worker(added_worker_id, desired_workers)

    def add_worker(
        self, added_worker_id: str, desired_workers: Dict[str, WorkerMetadata]
    ):
        worker_metadata = desired_workers[added_worker_id]

        def _trigger():
            logger.info(f"Enqueuing worker {added_worker_id}")
            self.worker_queue.enqueue(
                WorkerPayload(
                    type="worker",
                    module_name=worker_metadata.module_name,
                    args=worker_metadata.args,
                )
            )

        self.trigger_scheduler.add_job(
            _trigger,
            id=added_worker_id,
            trigger="interval",
            seconds=worker_metadata.interval_seconds,
        )

    def configure_workers(
        self,
        actual_worker_ids: Set[str],
        desired_worker_ids: Set[str],
        desired_workers: Dict[str, WorkerMetadata],
    ):
        same_worker_ids = actual_worker_ids.intersection(desired_worker_ids)
        for worker_id in same_worker_ids:
            desired_interval_seconds = desired_workers[worker_id].interval_seconds
            actual_interval_seconds = self.trigger_scheduler.get_job(
                worker_id
            ).trigger.interval.seconds
            if desired_interval_seconds != actual_interval_seconds:
                logger.info(
                    f"Going to reconfigure worker interval with id {worker_id} to {desired_interval_seconds} "
                    f"seconds"
                )
                self.trigger_scheduler.reschedule_job(
                    job_id=worker_id,
                    trigger="interval",
                    seconds=desired_interval_seconds,
                )
Esempio n. 20
0
class Schedule(Object):
    def __init__(self):

        self._init_scheduler()

    def _init_scheduler(self):
        '''
        init schedule agent
        '''
        jobstores = {
            'default':
            MongoDBJobStore(collection=self.col,
                            database=self.database,
                            client=self.client),
        }
        executors = {
            'default': ThreadPoolExecutor(10),
            'processpool': ProcessPoolExecutor(2)
        }
        job_defaults = {'coalesce': False, 'max_instances': 4}
        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults)
        self.scheduler.start()

    def add_job(self, *args, **kwargs):
        '''
        add schedule job
        add_job(func, trigger=None, args=None, kwargs=None, id=None, \
            name=None, misfire_grace_time=undefined, coalesce=undefined, \
            max_instances=undefined, next_run_time=undefined, \
            jobstore='default', executor='default', \
            replace_existing=False, **trigger_args)
        '''
        self.scheduler.add_job(*args, **kwargs)
        self.running()

    # def add_interval_job(self, task, job_id=None, args=None, schedule=None):
    def add_interval_job(self, *args, **kwargs):
        '''
        add schedule job
        kwargs:
            args = [] :call function args
            kwargs = {} :call function kwargs
            id = 'job id' :job id
            trigger :(e.g. ``date``, ``interval`` or ``cron``)
        args:
            task

        day=1, minute=20 is equivalent to year='*', month='*', day=1, week='*', day_of_week='*',
        hour='*', minute=20, second=0.
        The job will then execute on the first day of every month on every year at 20 minutes of
        every hour. The code examples below should further illustrate this behavior.
        http://apscheduler.readthedocs.io/en/latest/modules/triggers/cron.html?highlight=add_job
        '''
        job_id = kwargs.get('id')
        if not job_id:
            raise ValueError('The job id not provided.')
        job = self.get_schedule_jobs(job_id)
        if not job:
            self.scheduler.add_job(*args, **kwargs)
        else:
            self.scheduler.reschedule_job(*args, **kwargs)
        self.running()

    def running(self):
        '''
        start scheduler
        '''
        if self.scheduler.state != 1:
            self.scheduler.start()

    def remove_job(self, job_id=None):
        '''
        remove job by id
        '''
        if not job_id:
            raise ValueError('Job id not provided for remove schedule task.')
        if not self.jobstore:
            raise RuntimeError(
                'Job store for persiste schedule task not registered yet.')
        if self.get_schedule_jobs(job_id):
            self.scheduler.remove_job(job_id, jobstore=self.jobstore)
        self.running()

    def get_schedule_jobs(self, job_id=None):
        '''
        get schedule jobs
        '''
        jobs = []
        if not self.jobstore:
            raise RuntimeError(
                'Job store for persiste schedule task not registered yet.')
        if not job_id:
            jobs = self.scheduler.get_jobs(jobstore=self.jobstore)
        else:
            jobs = self.scheduler.get_job(job_id, jobstore=self.jobstore)
        return jobs
Esempio n. 21
0
class Stats(RyuApp):
    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

    _CONTEXTS = {
        'dpset': DPSet,
        'wsgi': WSGIApplication,
    }

    def __init__(self, *args, **kwargs):
        super(Stats, self).__init__(*args, **kwargs)
        # logger settings
        self.logger.setLevel(logging.INFO)
        self.logger.info('Application %s initialized', __name__)

        # routing
        self.mac_to_port = {}

        # dpset instance
        self.dpset = kwargs['dpset']

        # setup wsgi
        wsgi = kwargs['wsgi']
        wsgi.register(StatsRestApi, {
            StatsRestApi.controller_instance_name: self,
            StatsRestApi.dpset_instance_name: self.dpset
        })

        # init scheduler
        self.interval = STATS_INTERVAL
        self.sched = BackgroundScheduler()
        self.sched.start()
        logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
        logging.getLogger('apscheduler.scheduler').propagate = False

        # init place for PNDA watchdog
        try:
            os.mkdir('out')
        except OSError:
            pass

        # stats logs to file
        try:
            os.mkdir('stats')
        except OSError:
            pass

    def set_interval(self, interval):
        self.interval = interval
        self.change_sched_interval(interval)

    def change_sched_interval(self, interval):
        self.logger.debug("Rescheduling stat request to %i seconds", interval)
        for s in self.sched.get_jobs():
            self.logger.debug('rescheduling job %s', s.id)
            it = IntervalTrigger(seconds=interval)
            self.sched.reschedule_job(s.id, trigger=it)

    def send_flow_stats_request(self, datapath):
        # https://osrg.github.io/ryu-book/en/html/traffic_monitor.html
        self.logger.debug('Sending flow stat request to sw: %016x%i', datapath.id)
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        req = parser.OFPFlowStatsRequest(datapath)

        datapath.send_msg(req)
        with open('stats/flow_stats_req.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ofproto.OFP_FLOW_STATS_SIZE) + ' ' +str(req) + '\n')

    def send_port_stats_request(self, datapath):
        self.logger.debug('Sending flow stat request to sw: %016x%i', datapath.id)
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
        datapath.send_msg(req)
        with open('stats/port_stats_req.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ofproto.OFP_PORT_STATS_SIZE) + ' '+ str(req) + '\n')

    @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
    def flow_stats_reply_handler(self, ev):
        body = ev.msg.body
        flow_stats = []
        for stat in sorted([flow for flow in body if flow.priority == 1],
                           key=lambda flow: (flow.match['in_port'],
                                             flow.match['eth_dst'])):
            flow_stats.append('table_id=%s '
                         'duration_sec=%d duration_nsec=%d '
                         'priority=%d '
                         'idle_timeout=%d hard_timeout=%d flags=0x%04x '
                         'cookie=%d packet_count=%d byte_count=%d '
                         'match=%s instructions=%s' %
                         (stat.table_id,
                          stat.duration_sec, stat.duration_nsec,
                          stat.priority,
                          stat.idle_timeout, stat.hard_timeout, stat.flags,
                          stat.cookie, stat.packet_count, stat.byte_count,
                          stat.match, stat.instructions))
            data = {
                "origin": "flow_stats",
                "timestamp": time.time(),
                "switch_id": ev.msg.datapath.id,
                "duration_sec": stat.duration_sec,
                "duration_nsec": stat.duration_nsec,
                "src_mac": stat.match['eth_src'],
                "dst_mac": stat.match['eth_dst'],
                "byte_count": stat.byte_count,
                "packet_count": stat.packet_count,
                "in_port": stat.match['in_port']
            }

            with open('out/flow_stats.out', 'a') as file:
                file.write(json.dumps(data) + '\n')

        self.logger.debug('FlowStats for switch %i: %s', ev.msg.datapath.id, flow_stats)
        with open('stats/flow_stats_reply.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ev.msg.msg_len) + '\n')

    @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
    def port_stats_reply_handler(self, ev):
        body = ev.msg.body
        port_stats = []
        for stat in sorted(body, key=attrgetter('port_no')):
            port_stats.append("port_no=%d "
                              "rx_packets=%d "
                              "rx_bytes=%d "
                              "rx_errors=%d "
                              "tx_packets=%d "
                              "tx_bytes=%d "
                              "tx_errors=%d " %
                              (stat.port_no,
                               stat.rx_packets, stat.rx_bytes, stat.rx_errors,
                               stat.tx_packets, stat.tx_bytes, stat.tx_errors)
                              )
            data = {
                "origin": "port_stats",
                "timestamp": time.time(),
                "switch_id": ev.msg.datapath.id,
                "port_no": stat.port_no,
                "rx_packets": stat.rx_packets,
                "rx_bytes": stat.rx_bytes,
                "rx_errors": stat.rx_errors,
                "tx_packets": stat.tx_packets,
                "tx_bytes": stat.tx_bytes,
                "tx_errors": stat.tx_errors
            }

            with open('out/port_stats.out', 'a') as file:
                file.write(json.dumps(data) + '\n')
        self.logger.debug('PortStats for switch %i: %s', ev.msg.datapath.id, port_stats)
        with open('stats/port_stats_reply.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' + str(ev.msg.msg_len) + '\n')

    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
    def switch_features_handler(self, ev):
        datapath = ev.msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        # install table-miss flow entry
        #
        # We specify NO BUFFER to max_len of the output action due to
        # OVS bug. At this moment, if we specify a lesser number, e.g.,
        # 128, OVS will send Packet-In with invalid buffer_id and
        # truncated packet data. In that case, we cannot output packets
        # correctly.  The bug has been fixed in OVS v2.1.0.
        match = parser.OFPMatch()
        actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
                                          ofproto.OFPCML_NO_BUFFER)]
        self.add_flow(datapath, 0, match, actions, 0)

        # Add job to scheduler
        self.logger.debug('Starting scheduler for dp =%i', ev.msg.datapath.id)
        # ## flow stats
        self.sched.add_job(self.send_flow_stats_request, 'interval', seconds=STATS_INTERVAL, start_date=START_DATE,
                           args=[ev.msg.datapath])
        ## port stats
        self.sched.add_job(self.send_port_stats_request, 'interval', seconds=STATS_INTERVAL, start_date=START_DATE,
                           args=[ev.msg.datapath])

    def add_flow(self, datapath, priority, match, actions, idle_timeout, buffer_id=None):
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
                                             actions)]
        if buffer_id:
            mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
                                    priority=priority, match=match,
                                    instructions=inst,idle_timeout=idle_timeout)
        else:
            mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
                                    match=match, instructions=inst,idle_timeout=idle_timeout)
        datapath.send_msg(mod)

    @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
    def _packet_in_handler(self, ev):
        # If you hit this you might want to increase
        # the "miss_send_length" of your switch
        if ev.msg.msg_len < ev.msg.total_len:
            self.logger.debug("packet truncated: only %s of %s bytes",
                              ev.msg.msg_len, ev.msg.total_len)
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        in_port = msg.match['in_port']

        pkt = packet.Packet(msg.data)
        eth = pkt.get_protocols(ethernet.ethernet)[0]

        if eth.ethertype == ether_types.ETH_TYPE_LLDP:
            # ignore lldp packet
            return
        dst = eth.dst
        src = eth.src

        # self.logger.info("packet in dpid = %s src = %s dst = %s in_port = %s", dpid, src, dst, in_port)

        # learn a mac address to avoid FLOOD next time.
        self.mac_to_port[dpid][src] = in_port

        if dst in self.mac_to_port[dpid]:
            out_port = self.mac_to_port[dpid][dst]
        else:
            out_port = ofproto.OFPP_FLOOD

        actions = [parser.OFPActionOutput(out_port)]

        # install a flow to avoid packet_in next time
        if out_port != ofproto.OFPP_FLOOD:
            match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)

            # verify if we have a valid buffer_id, if yes avoid to send both
            # flow_mod & packet_out
            if msg.buffer_id != ofproto.OFP_NO_BUFFER:
                self.add_flow(datapath, 1, match, actions, 3, msg.buffer_id)
                return
            else:
                self.add_flow(datapath, 1, match, actions, 3)
        data = None
        if msg.buffer_id == ofproto.OFP_NO_BUFFER:
            data = msg.data

        out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
                                  in_port=in_port, actions=actions, data=data)
        datapath.send_msg(out)
Esempio n. 22
0
class SchedulerEdge(object):

    core = None

    def __init__(self, parent):  #instância do objeto e inicia o escalonador

        self.core = parent
        self.scheduler = BackgroundScheduler(
        )  # atribui um agendador background
        self.scheduler.start()  # inicia o agendador

        #self.create_job_check_persistence()

        #self.check_scheduler_reactivave()

    def add_job(self, jsonObject):  # cria uma nova tarefa no escalonador
        #print(type(jsonObject['status']))
        jsonObject['collect_to_rule'] = False
        if (jsonObject['status'] == 'True' or jsonObject['status'] == True):
            if jsonObject['id'] != '0':
                #print("ADICIONOU SENSOR")
                try:
                    #print('klhashjkah')
                    self.scheduler.add_job(self.function,
                                           jsonObject['modo'],
                                           second=jsonObject['second'],
                                           minute=jsonObject['minute'],
                                           hour=jsonObject['hour'],
                                           day=jsonObject['day'],
                                           month=jsonObject['month'],
                                           year=jsonObject['year'],
                                           id=str(jsonObject['id']),
                                           args=[jsonObject],
                                           max_instances=50,
                                           misfire_grace_time=120)
                except:  # Utilizado quando tem uma tarefa com ID para reescalonar
                    self.scheduler.reschedule_job(jsonObject['id'],
                                                  trigger='cron',
                                                  second=jsonObject['second'],
                                                  minute=jsonObject['minute'],
                                                  hour=jsonObject['hour'],
                                                  day=jsonObject['day'],
                                                  month=jsonObject['month'],
                                                  year=jsonObject['year'])

            else:
                self.scheduler.add_job(self.check_persistence,
                                       jsonObject['modo'],
                                       second=jsonObject['second'],
                                       minute=jsonObject['minute'],
                                       hour=jsonObject['hour'],
                                       day=jsonObject['day'],
                                       month=jsonObject['month'],
                                       year=jsonObject['year'],
                                       id=jsonObject['id'],
                                       max_instances=1,
                                       misfire_grace_time=120)

    def remove_job(
        self, jsonObject
    ):  # id_tarefa - É ID do sensor/atuador a ser removido do CRON
        self.scheduler.remove_job(jsonObject['id'])

    def function(self, jsonObject):  # response - É JSON passado como argumento

        #print(jsonObject['id'])
        #print("SENSOR ADD"+jsonObject['id_sensor'])
        object_events = Event_Treatment(self.core)
        object_events.event(jsonObject)

    def check_persistence(self):  # Modificar
        print("Tentou publicar a persistencia")
        persistence_publisher = Publisher(self.core)
        persistence_publisher.start()

#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------

# Adiciona uma TAREFA no CRON, tornando resposavél pela publicação no contexto
# quando não ocorreu com sucesso este ato no módulo de gathering.

    def create_job_check_persistence(
            self):  # Cria um JSON no formato exato que SCHEDULER
        # irá utilizar. QQ tratamento deve ocorrer aqui.
        job = {}

        job['modo'] = "cron"
        job['id'] = "0"
        job['status'] = "True"

        job['second'] = "*/30"
        #job['second'] = "0"
        #job['minute']  = "*/10"
        job['minute'] = "*"
        job['hour'] = "*"
        job['day'] = "*"
        job['week'] = "*"
        job['month'] = "*"
        job['year'] = "*"

        self.add_job(job)

    def check_scheduler_reactivave(self):
        # print("check_scheduler_reactivave")

        try:
            #print("check_scheduler_reactivave   TRY")
            jsonSchedules = self.core.API_access("get", "schedules").json()
            print(jsonSchedules)

            for schedule in jsonSchedules:
                schedule['modo'] = 'cron'
                print(schedule)
                self.add_job(schedule)

        except Exception as inst:
            #print("check_scheduler_reactivave   EXCEPTION")
            #print(type(inst))
            time.sleep(10)
            self.check_scheduler_reactivave()

        except:
            print("PODE SER ERRO DE ACESSO POR CAUSA DE TOKEN INVALIDO")
Esempio n. 23
0
class SchedulerCollectData():
    def __init__(self, exe_function, live_freq, history_freq):
        # ex: {"4":{"ports":[280,281],"mempools":[1,2]}}
        # {"4":{"ports":[280,281]}}
        self.live_dev = {
        }  #{1:{'devices':[{'device_id':1}]}, 2:{'devices':[{'device_id':2}]}} #{d1:{rec_message}, d2:{rec_message}}
        self.all_dev = {
        }  #{4:{'devices':[{'device_id':4, 'result':0}]}, 5:{'devices':[{'device_id':5, 'result':0}]},6:{'devices':[{'device_id':6, 'result':0}]}, 7:{'devices':[{'device_id':7, 'result':0}]}} # {d1:{rec_message}, d2:{rec_message}}
        self.event_dev = {}
        self.scheduler = BackgroundScheduler({
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '30'
            }
        })
        self.exe_function = exe_function
        self.pool = ThreadPool(processes=multiprocessing.cpu_count())
        self.live_freq = int(live_freq)
        self.history_freq = int(history_freq)
        self.send_live = {}
        self.jobdir = '/opt/observium/qrmobservium/common/scheduler/jobs/livedata'
        self.scheduler.add_job(self._period_function,
                               trigger='interval',
                               args=('live', ),
                               seconds=live_freq,
                               id="live_period")
        self.scheduler.add_job(self._job_function,
                               trigger='interval',
                               args=('job', ),
                               seconds=3,
                               id="job_poll")
        self.scheduler.start()

    def start(self):
        try:
            # This is here to simulate application activity (which keeps the main thread alive).
            while True:
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown(
            )  # Not strictly necessary if daemonic mode is enabled but should be done if possible

    def add_dev(self, msg):
        #msg = {"4":{"ports":[280,281]}}
        for dev_id in msg:
            print 'add_dev : %s' % dev_id
            if dev_id not in self.live_dev:
                self.live_dev[dev_id] = msg[dev_id].copy()
                print 'live_list %s' % self.live_dev
        #dev_id = int(msg['devices'][0]['device_id'])
        #if dev_id not in self.live_dev:
        #    msg.pop('get_event_devices', None)
        #    self.all_dev[dev_id] = msg.copy()
        #print "all list:" + simplejson.dumps(self.all_dev)

    def remove_dev(self, msg):
        dev_id = int(msg['devices'][0]['device_id'])
        self.all_dev.pop(dev_id, None)
        self.live_dev.pop(dev_id, None)
        self.event_dev.pop(dev_id, None)

    def add_event_dev(self, msg):
        self.event_dev = {}
        for dev in msg["devices"]:
            self.event_dev[dev["device_id"]] = dev
        #print "event list:" + simplejson.dumps(self.event_dev)

    def remove_event_dev(self, msg):
        dev_id = int(msg['devices'][0]['device_id'])
        self.event_dev.pop(dev_id, None)

    def _period_function(self, schedule_type):
        global d_time
        results = []
        print 'debug %s' % multiprocessing.cpu_count()
        if schedule_type == "live":
            #print "enter live_function:" + str(datetime.datetime.now())
            d_time = int(time.time())
            for key, value in self.live_dev.iteritems():
                print '_period %s %s d_time %s' % (key, value, d_time)
                #value =self.live_dev.values()
                for port_id in value['ports']:
                    #print 'kk %s' % port_id
                    self.pool.apply_async(self.exe_function,
                                          args=(
                                              key,
                                              port_id,
                                          ))

    def _job_function(self, schedule_type):
        if not os.path.isdir(self.jobdir):
            print 'path not exist'
            return
        device_ids = os.listdir(self.jobdir)
        if len(device_ids) <= 0:
            print 'no devices'
            for device_id in self.live_dev.keys():
                for port in self.live_dev[device_id]['ports']:
                    self.live_dev[device_id]['ports'].remove(port)
                del self.live_dev[device_id]
            return
        for device_id in self.live_dev.keys():
            if device_id not in device_ids:
                self.live_dev.pop(device_id, None)
        for device_id in device_ids:
            msg = {}
            constructport = {}
            portlist = []
            ports = os.listdir(self.jobdir + "/" + device_id)
            if len(ports) <= 0:
                if not device_id in self.live_dev:
                    return
                for port in self.live_dev[device_id]['ports']:
                    if port not in ports:
                        self.live_dev[device_id]['ports'].remove(port)
                        print 'del ports %s' % self.live_dev[device_id]['ports']
                return
            portlist = [int(port) for port in ports]
            #msg[device_id] = constructport
            if device_id in self.live_dev:
                cur_ports = self.live_dev[device_id]['ports']
            else:
                cur_ports = []
            diff_ports = list(set(portlist) - set(cur_ports))
            if len(diff_ports) == 0 and len(portlist) >= len(cur_ports):
                continue
            for port in diff_ports:
                if port not in portlist:
                    portlist.append(port)
            constructport['ports'] = portlist
            msg[device_id] = constructport
            self.live_dev[device_id] = msg[device_id].copy()
            print '_job_function live_list %s' % self.live_dev

    def change_live_freq(self, sec):
        self.live_freq = sec
        self.scheduler.reschedule_job(job_id="live_period",
                                      trigger='interval',
                                      seconds=self.live_freq)

    def change_history_freq(self, minute):
        self.history_freq = minute
Esempio n. 24
0
class Gardener:
    """
    Gardener manages garden according schedule and collected sensor data.
     * Garden - Controls HW I/O - simple stateless servant for single thread use.
       * sensors: temperature (TODO: water level, light density, ...)
       * relays: pump, fan, fogger
     * Records - Via scheduler collects and store sensors data + current garden state.

     * Web server shows
       * current garden state (TODO)
       * light version of sensor data history
       * next planned maintenance action (TODO)
       * buttons for manipulation with garden
    """
    def __init__(self):
        self.garden = Garden()
        self.records = Records(sensors=self.garden.sensors)
        self.scheduler = BackgroundScheduler({
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '1'
            }
        })

    def reschedule_job(self, job_id):
        period_minutes = self.compute_period(
            job_id, self.garden.city_temperature.value)
        last_job_run = self.garden.get_last_job_run(job_id)
        next_job_run = max((self.get_asap_schedule(),
                            last_job_run + timedelta(minutes=period_minutes)))
        self.scheduler.reschedule_job(job_id,
                                      trigger='cron',
                                      minute="*/{}".format(period_minutes),
                                      start_date=str(next_job_run))

    def sensors_refresh(self):
        old_temperature = self.garden.city_temperature.value
        self.garden.sensors_refresh()
        new_temperature = self.garden.city_temperature.value

        if old_temperature != new_temperature and new_temperature:
            self.reschedule_job('FOGGING')
            self.reschedule_job('WATERING')

    def send_sms_report(self):
        message = 'I am alive.'
        for sensor in self.garden.sensors:
            message += " {}:{}".format(sensor.name, str(sensor.value))
        message += " f:{}/{} w:{}/{}".format(
            self.get_job_period("FOGGING"),
            self.garden.get_job_run_count("FOGGING"),
            self.get_job_period("WATERING"),
            self.garden.get_job_run_count("WATERING"))
        utils.sms.send_sms(message)

    def working_loop(self):
        # shared cross threads
        threading.gardener = self

        # default schedule
        cron_params = {
            'trigger': 'cron',
            'misfire_grace_time': MISFIRE_GRACE_TIME
        }
        self.scheduler.add_job(self.garden.watering, trigger='date')
        self.scheduler.add_job(self.garden.watering,
                               minute='*/20',
                               id='WATERING',
                               **cron_params)
        self.scheduler.add_job(self.garden.fogging,
                               minute='*/3',
                               id='FOGGING',
                               **cron_params)

        # sensors maintenance
        self.scheduler.add_job(self.sensors_refresh,
                               minute='*/10',
                               **cron_params)
        self.scheduler.add_job(self.records.write_values,
                               minute='*/10',
                               kwargs={'file': config.SensorData.FULL_FILE},
                               **cron_params)
        self.scheduler.add_job(self.records.write_values,
                               hour='*',
                               kwargs={'file': config.SensorData.WEB_FILE},
                               **cron_params)
        self.scheduler.add_job(
            self.records.trim_records,
            week='*',  # show on web only latest 30 days
            kwargs={
                'file': config.SensorData.WEB_FILE,
                'count': 24 * 7 * 4
            },
            **cron_params)
        self.scheduler.add_job(self.send_sms_report, hour='12', **cron_params)

        # TODO: create more oxygen when high temperature via extra long pumping cycle?

        # network maintenance
        self.scheduler.add_job(utils.network.check_and_fix,
                               hour='*',
                               kwargs={
                                   'address': config.RouterAddress,
                                   'network': 'wlan0'
                               },
                               **cron_params)
        self.scheduler.add_job(utils.system.reboot, hour='0', **cron_params)

        logging.info('Starting scheduler.')
        self.scheduler.start()

        # web server needs main thread for its signal handling
        logging.info('Starting web server.')
        web_server.run(**config.WebServer)

        self.scheduler.shutdown()

    def get_job_period(self, job_id):
        trigger = self.scheduler.get_job(job_id).trigger
        period = re.search(r"cron\[minute='\*/(\d+)'\]", str(trigger))
        return int(period.group(1)) if period else 0

    def get_job_next_run_time(self, job_id):
        return self.scheduler.get_job(job_id).next_run_time

    def start_job(self, job_id):
        # FIXME
        job = self.scheduler.get_job(job_id)

    @staticmethod
    def get_asap_schedule():
        return datetime.now() + timedelta(seconds=2)

    @staticmethod
    def compute_period(job_id, temperature):

        if job_id == 'FOGGING':
            return int(2 * 4 * 60 /
                       (temperature -
                        4)**1.5) if 4 < temperature < 27 else INFINITE_MINUTES
        elif job_id == 'WATERING':
            return int(4 * 24 * 60 /
                       (temperature -
                        4)**2) if 4 < temperature < 27 else INFINITE_MINUTES
        else:
            assert 0
Esempio n. 25
0
class Scheduler(Flask):

    days = {
        "0": "sun",
        "1": "mon",
        "2": "tue",
        "3": "wed",
        "4": "thu",
        "5": "fri",
        "6": "sat",
        "7": "sun",
        "*": "*",
    }

    seconds = {"seconds": 1, "minutes": 60, "hours": 3600, "days": 86400}

    def __init__(self):
        super().__init__(__name__)
        with open(Path.cwd().parent / "setup" / "scheduler.json", "r") as file:
            self.settings = load(file)
        dictConfig(self.settings["logging"])
        self.configure_scheduler()
        self.register_routes()

    @staticmethod
    def aps_date(date):
        if not date:
            return
        date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
        return datetime.strftime(date, "%Y-%m-%d %H:%M:%S")

    def configure_scheduler(self):
        self.scheduler = BackgroundScheduler(self.settings["config"])
        self.scheduler.start()

    def register_routes(self):
        @self.route("/delete_job/<job_id>", methods=["POST"])
        def delete_job(job_id):
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
            return jsonify(True)

        @self.route("/next_runtime/<task_id>")
        def next_runtime(task_id):
            job = self.scheduler.get_job(task_id)
            if job and job.next_run_time:
                return jsonify(job.next_run_time.strftime("%Y-%m-%d %H:%M:%S"))
            return jsonify("Not Scheduled")

        @self.route("/schedule", methods=["POST"])
        def schedule():
            if request.json["mode"] in ("resume", "schedule"):
                result = self.schedule_task(request.json["task"])
                if not result:
                    return jsonify({"alert": "Cannot schedule in the past."})
                else:
                    return jsonify({
                        "response": "Task resumed.",
                        "active": True
                    })
            else:
                try:
                    self.scheduler.pause_job(request.json["task"]["id"])
                    return jsonify({"response": "Task paused."})
                except JobLookupError:
                    return jsonify(
                        {"alert": "There is no such job scheduled."})

        @self.route("/time_left/<task_id>")
        def time_left(task_id):
            job = self.scheduler.get_job(task_id)
            if job and job.next_run_time:
                delta = job.next_run_time.replace(tzinfo=None) - datetime.now()
                hours, remainder = divmod(delta.seconds, 3600)
                minutes, seconds = divmod(remainder, 60)
                days = f"{delta.days} days, " if delta.days else ""
                return jsonify(f"{days}{hours}h:{minutes}m:{seconds}s")
            return jsonify("Not Scheduled")

    @staticmethod
    def run_service(task_id):
        post(
            f"{getenv('ENMS_ADDR')}/rest/run_task",
            json=task_id,
            auth=HTTPBasicAuth(getenv("ENMS_USER"), getenv("ENMS_PASSWORD")),
            verify=int(getenv("VERIFY_CERTIFICATE", 1)),
        )

    def schedule_task(self, task):
        if task["scheduling_mode"] == "cron":
            crontab = task["crontab_expression"].split()
            crontab[-1] = ",".join(self.days[day]
                                   for day in crontab[-1].split(","))
            trigger = {"trigger": CronTrigger.from_crontab(" ".join(crontab))}
        elif task["frequency"]:
            trigger = {
                "trigger":
                "interval",
                "start_date":
                self.aps_date(task["start_date"]),
                "end_date":
                self.aps_date(task["end_date"]),
                "seconds":
                int(task["frequency"]) * self.seconds[task["frequency_unit"]],
            }
        else:
            trigger = {
                "trigger": "date",
                "run_date": self.aps_date(task["start_date"])
            }
        if not self.scheduler.get_job(task["id"]):
            job = self.scheduler.add_job(
                id=str(task["id"]),
                replace_existing=True,
                func=self.run_service,
                args=[task["id"]],
                **trigger,
            )
        else:
            job = self.scheduler.reschedule_job(str(task["id"]), **trigger)
        return job.next_run_time > datetime.now(job.next_run_time.tzinfo)
Esempio n. 26
0
class Collector:
    def __init__(self):
        self.logger = get_logger('collector')
        self.device_info = None
        self.job_order_queue = None

        self.scheduler = BackgroundScheduler(timezone="Asia/Seoul")
        self.scheduler.start()

        self.data = dict()
        self.data['__last_fetch'] = dict()
        self.queue = deque(maxlen=60)

    # =========================================================================
    def add_job_schedule_by_template_file(self, file_path):
        with open(file_path, 'r') as f:
            templates = yaml.safe_load(f)
        for key in templates:
            name = key
            seconds, code, template, use, description = operator.itemgetter(
                'seconds', 'code', 'template', 'use', 'description'
            )(templates[key])

            comm_type = templates[key]['comm']['type']
            host, port = operator.itemgetter('host', 'port')(
                templates[key]['comm']['setting'])

            kw_argument = dict(code=code,
                               name=name,
                               interval_second=seconds,
                               template=template,
                               ip=host,
                               port=port,
                               description=description,
                               use=use,
                               comm_type=comm_type)

            self.add_job_schedule(**kw_argument)

    # =========================================================================
    @staticmethod
    def get_python_module(code, name, ip, port):
        def indent(text, amount, ch=' '):
            import textwrap
            return textwrap.indent(text, amount * ch)

        code = CONTEXT.format(ip=ip, port=port, code=indent(code, 4))
        module = types.ModuleType(name)
        exec(code, module.__dict__)
        return module

    # =========================================================================
    def add_job_schedule(self, code: str, name: str, interval_second: int,
                         template, ip, port, description, use, comm_type):

        module = self.get_python_module(code, name, ip, port)
        parameters = name, module, template
        comm = {'comm_typ': comm_type, 'setting': {'host': ip, 'port': port}}
        self.scheduler.pause()
        try:
            self.scheduler.add_job(
                self.request_data,
                args=parameters,
                kwargs={'code': code,
                        'use': use,
                        'description': description,
                        'comm': comm,
                        'interval_second': interval_second},
                id=name, trigger='interval', seconds=interval_second)
        finally:
            self.scheduler.resume()

    # =========================================================================
    def remove_job_schedule(self, _id: str):
        self.scheduler.remove_job(_id)
        del self.data[_id]
        return

    # =========================================================================
    def modify_job_schedule(self, _id, seconds):
        self.scheduler.reschedule_job(_id, trigger='interval', seconds=seconds)

    # =========================================================================
    def request_data(self, name, module, template, **kwargs):
        data = module._main()
        result = get_json_data_with_template(data, template=template)
        result['hex'] = data.hex(' ')

        if name not in self.data:
            self.data[name] = deque(maxlen=60)
        self.data['__last_fetch'][name] = [result]
        self.data[name].append(result)
        self.data[name].rotate()
        return result

    # =========================================================================
    def get_schedule_jobs(self):
        jobs = self.scheduler.get_jobs()
        if not jobs:
            return jobs
        result = list()
        for job in jobs:
            _, _, template = job.args
            code, description, use, comm, seconds = operator.itemgetter(
                'code', 'description', 'use', 'comm',
                'interval_second')(job.kwargs)
            result.append(
                dict(id=job.id, code=code, template=template,
                     description=description, use=use, comm=comm,
                     seconds=seconds))
        return result
Esempio n. 27
0
class SchedulerService(rpyc.Service):
    def __init__(self, **config):
        self._scheduler = BackgroundScheduler()
        self._scheduler.configure(**config)
        self._scheduler.start()
        self.logger = logging.getLogger("Heartbeat.core")
        self.logger.info("Heartbeat Core Initalized")

    def on_connect(self, conn):
        # code that runs when a connection is created
        # (to init the service, if needed)
        self.logger.info("----------Begin New Client----------")
        self.logger.info(conn)
        self.logger.info("----------End New Client----------")

    def on_disconnect(self, conn):
        # code that runs after the connection has already closed
        # (to finalize the service, if needed)
        self.logger.info("----------Begin Goodbye Client----------")
        self.logger.info(conn)
        self.logger.info("----------End Goodbye Client----------")

    def exposed_add_job(self, func, *args, **kwargs):
        self.logger.info("----------Begin New Job----------")
        self.logger.info("Function: %s", str(func))
        self.logger.info("*args: %s", str(args))
        self.logger.info("**kwargs: %s", str(dict(kwargs)))
        self.logger.info("----------Eng New Job----------")
        return self._scheduler.add_job(func, *args, **kwargs)

    def exposed_modify_job(self, job_id, jobstore=None, **changes):
        return self._scheduler.modify_job(job_id, jobstore, **changes)

    def exposed_reschedule_job(self,
                               job_id,
                               jobstore=None,
                               trigger=None,
                               **trigger_args):
        return self._scheduler.reschedule_job(job_id, jobstore, trigger,
                                              **trigger_args)

    def exposed_pause_job(self, job_id, jobstore=None):
        return self._scheduler.pause_job(job_id, jobstore)

    def exposed_resume_job(self, job_id, jobstore=None):
        return self._scheduler.resume_job(job_id, jobstore)

    def exposed_remove_job(self, job_id, jobstore=None):
        self._scheduler.remove_job(job_id, jobstore)

    def exposed_get_job(self, job_id, jobstore=None):
        return self._scheduler.get_job(job_id, jobstore=jobstore)

    def exposed_get_jobs(self, jobstore=None):
        results = self._scheduler.get_jobs(jobstore)
        return results

    def exposed_get_tasks(self):
        """Return a list of schedule-able function"""
        tasks = []
        for module_file in os.listdir(
                os.path.join(os.path.dirname(__file__), "task")):
            if module_file == "__init__.py" or module_file[-3:] != ".py":
                continue
            module_name = "server.task.{}".format(module_file[:-3])
            module = importlib.import_module(module_name)
            if not hasattr(module, "__all__"):
                continue
            for function_name in module.__all__:
                function = getattr(module, function_name)
                if not callable(function):
                    continue
                parameters = inspect.signature(function).parameters
                parameters_str = ", ".join(
                    [str(val) for key, val in parameters.items()])
                tasks.append("{}:{}({})".format(module_name, function_name,
                                                parameters_str))
        return tasks
class SpecificTimeReporter(object):
    """
    This class is used to get the real-time market data at specific time everyday from yahoo finance database,
    the accessed data will not be saved at local,
    please use this with my Stock Data Reader class
    """
    def __init__(self, function):
        """
        :param function: function of the arranged job, in this case,
        it should be the getCurrentMarketData function
        """
        self._scheduler = None
        self.function = function
        self.count = 1
        self._all_job = {}
        self.start()

    def start(self):
        """
        start the reporter
        :return: None
        """
        self._scheduler = BackgroundScheduler()
        self._scheduler.start()

    def convertInt2Time(self, hour, minute, second):
        """
        You do not need to call this method, you can treat this as a private method
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :return: string format of time
        """
        ans = ""
        if hour < 10:
            ans = ans + "0" + str(hour)
        else:
            ans = ans + str(hour)
        if minute < 10:
            ans = ans + "0" + str(minute)
        else:
            ans = ans + str(minute)
        if second < 10:
            ans = ans + "0" + str(second)
        else:
            ans = ans + str(second)
        return ans

    def addJob(self, hour, minute, second, *args):
        """
        add a reporter
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :param args: tickerList,like:["AAPL","IBM","JPM"]
        :return: None
        """
        timeString = self.convertInt2Time(hour, minute, second)

        if timeString not in self._all_job:
            self._all_job[timeString] = str(self.count)
            self._scheduler.add_job(self.function,
                                    trigger='cron',
                                    hour=hour,
                                    minute=minute,
                                    second=second,
                                    args=args,
                                    id=str(self.count))
            self.count = self.count + 1
        else:
            self._scheduler.reschedule_job(self._all_job[timeString],
                                           trigger='cron',
                                           hour=hour,
                                           minute=minute,
                                           second=second)

    def removeJob(self, hour, minute, second):
        """
        remove a reporter
        :param hour: integer ranging from 0 to 23
        :param minute: integer ranging from 0 to 59
        :param second: integer ranging from 0 to 59
        :return: None
        """
        timeString = self.convertInt2Time(hour, minute, second)
        if timeString not in self._all_job:
            warnings.warn("Job not found!")
        else:
            self._scheduler.remove_job(self._all_job[timeString])

    def removeAllJobs(self):
        """
        remove all reporters
        :return: None
        """
        self._scheduler.remove_all_jobs()

    def pause(self):
        """
        pause all reporters
        :return: None
        """
        self._scheduler.pause()

    def resume(self):
        """
        resume the paused reporters
        :return: None
        """
        self._scheduler.resume()

    def getAllJobs(self):
        """
        print the information of all reporters
        :return: None
        """
        self._scheduler.print_jobs()

    def shutDown(self):
        """
        shut down all reporters
        :return: None
        """
        self._scheduler.shutdown()
Esempio n. 29
0
class Scheduler(object):
    def __init__(self):
        self.cron_job_args = [
            'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute',
            'second', 'start_date', 'end_date'
        ]
        self.scheduler = BackgroundScheduler(
            timezone=getattr(config, 'TIME_ZONE', 'Asia/Shanghai'))
        self.jobs = {}
        self.already_init = False

    def init(self):
        if not self.already_init:
            for job in Job.query.filter_by(enabled=True).all():
                self.add_job(job)
            self.scheduler.start()
            self.already_init = True

    def __parse_args(self, trigger, trigger_args):
        if trigger == 'cron':
            args = {
                k: v
                for k, v in zip(self.cron_job_args, trigger_args.split(';'))
                if v
            }
            # 周需要单独处理,0 对应周一,与页面上的说明不一致
            day_of_week = int(
                args['day_of_week']) if args.get('day_of_week') else None
            if day_of_week == 0:
                args['day_of_week'] = 6
            elif day_of_week is not None:
                args['day_of_week'] = day_of_week - 1
            return args
        elif trigger == 'interval':
            return {'seconds': int(trigger_args)}
        elif trigger == 'date':
            return {'run_date': trigger_args}
        else:
            raise ValueError('未知的调度策略: %r' % trigger)

    def add_job(self, job):
        job_id = str(job.id)
        args = self.__parse_args(job.trigger, job.trigger_args)
        instance = self.scheduler.add_job(agent,
                                          job.trigger,
                                          id=job_id,
                                          args=(job.id, job.command_user,
                                                job.command, job.targets),
                                          **args)
        self.jobs[job_id] = instance

    def valid_job_trigger(self, trigger, trigger_args):
        try:
            args = self.__parse_args(trigger, trigger_args)
            job = self.scheduler.add_job(agent,
                                         trigger,
                                         args=(None, None, None, None),
                                         next_run_time=None,
                                         **args)
            job.remove()
            return True
        except ValueError:
            return False

    def remove_job(self, job_id):
        job_id = str(job_id)
        if self.scheduler.get_job(job_id):
            self.scheduler.remove_job(job_id)

    def update_job(self, job):
        job_id = str(job.id)
        if self.scheduler.get_job(job_id):
            args = self.__parse_args(job.trigger, job.trigger_args)
            self.scheduler.reschedule_job(job_id, trigger=job.trigger, **args)
        elif job.enabled:
            self.add_job(job)
Esempio n. 30
0
class MelsecplcWorker(BaseWorker):

    melsecPlc_Setting = settings['manager']['workers']['melsecplc']
    gateway_id = None
    device_name = None
    if 'topic_subscription' in melsecPlc_Setting:
        _, _, gateway_id, device_name, cmd_type, cmd = melsecPlc_Setting[
            'topic_subscription'].split('/')

    # Default values
    devices = {}
    # Payload that should be send when device is available
    available_payload = 'home'  # type: str
    # Payload that should be send when device is unavailable
    unavailable_payload = 'not_home'  # type: str
    # After what time (in seconds) we should inform that device is available (default: 0 seconds)
    available_timeout = 0  # type: float
    # After what time (in seconds) we should inform that device is unavailable (default: 60 seconds)
    unavailable_timeout = 60  # type: float
    scan_timeout = 10.  # type: float
    scan_passive = True  # type: str or bool
    start_to_collect = False  #Global variable to control start to collect or not
    flag_started = False  #Flag to control BleDeviceStatus object creation
    count = 0

    # Defined Puhlish MQTT Topic
    ReadData_Topic = "/IEW/{gateway}/{device}/ReplyData"
    HeartBeat_Topic = "/IEW/{gateway}/{device}/Status/HeartBeat"
    Alarm_Topic = "/IEW/{gateway}/{device}/Status/Alarm"

    ReadData_job_id = '{}_interval_job'.format("ReadData")
    Version = "1.0"
    Status = "Init"

    Job_queue = queue(maxsize=10)

    _LOGGER.info("Version = " + Version)
    _LOGGER.info("MelsecPLC Class Initial Funished")
    _LOGGER.info("Setting Gateway_ID  = " + gateway_id + ", Device_ID = " +
                 device_name)
    _LOGGER.info("ReadData_Topic = " + ReadData_Topic)
    _LOGGER.info("HeartBeat_Topic = " + HeartBeat_Topic)

    def __init__(self, command_timeout, **kwargs):

        super(MelsecplcWorker, self).__init__(command_timeout, **kwargs)
        self._scheduler = BackgroundScheduler(timezone=utc)
        self._scheduler.add_job(self.Read_PLC_Data,
                                'interval',
                                seconds=10,
                                id=self.ReadData_job_id)
        self._scheduler.start()

        # Apschedule 不要紀錄 Logger
        logging.getLogger('apscheduler.executors.default').propagate = False

        self.Status = "Init"
        _LOGGER.info("MelsecplcWorker --> starts = " + self.Status)

        self.report_alarm('0000', 'Info', 'MelsecplcWorker is Initial')

    def run(self, mqtt):

        while True:
            time.sleep(1)
            if self.Job_queue.empty() == True:
                continue
            else:
                try:
                    SendOutMQTT = self.Job_queue.get()
                    mqtt.publish(SendOutMQTT)
                except Exception as ee:
                    logger.log_exception(
                        _LOGGER, 'MelsecplcWorker --> Publish Error Msg : %s',
                        str(ee))

    def create_devices(self):
        if not self.start_to_collect:
            self.last_status = []

            for device_name, dev_info in self.devices.items():
                _LOGGER.info(
                    "MelsecplcWorker --> create_devices : device_name = " +
                    device_name)
                for d in dev_info:
                    _LOGGER.info(
                        "MelsecplcWorker --> create_devices : ip_addr = " +
                        d['IP_ADDR'])
                    _LOGGER.info(
                        "MelsecplcWorker --> create_devices : port_id = " +
                        d['PORT_ID'])
                    self.last_status = [
                        PLCDevice(self, d['IP_ADDR'], d['PORT_ID'],
                                  device_name)
                    ]

            self.start_to_collect = True

    def set_stop_flag(self):
        self.start_to_collect = False
        self.flag_started = False

    def Read_PLC_Data(self):

        if self.start_to_collect and self.flag_started:
            self.count += 1
            if self.count > 65535:
                self.count = 1

            _LOGGER.debug("MelsecplcWorker --> ReadPLC Data enters count = " +
                          str(self.count))
            sendout_topic = self.ReadData_Topic.replace(
                "{gateway}", self.gateway_id).replace("{device}",
                                                      self.device_name)

            # 成功讀取狀態改為Run
            try:
                for status in self.last_status:
                    json_msg = status.read_data(self.addr_array)
                    status.set_status(status is not None)
                    self.Job_queue.put(
                        [MqttMessage(topic=sendout_topic, payload=json_msg)])
                    _LOGGER.info("MelsecplcWorker --> Report Read PLC Data: " +
                                 json_msg)
                    self.Status = "Run"
            except Exception as ee:
                logger.log_exception(
                    _LOGGER,
                    'MelsecplcWorker --> Read Data Excepting. Msg : %s',
                    str(ee))
                self.Status = "Down"
                # Read Data Error Send Error to MQTT

        else:
            _LOGGER.info(
                "MelsecplcWorker --> status_update: Waiting for Collect Command!"
            )

    def read_payload_cmd_start(self, device_name, payload, topic):

        try:
            cmd_start = {}
            cmd_start = json.loads(payload)

            _LOGGER.info(
                "MelsecplcWorker --> read_payload_cmd_start: payload = " +
                payload)
            _LOGGER.info(
                "MelsecplcWorker --> read_payload_cmd_start: cmd_start = " +
                str(cmd_start))
            _LOGGER.info(
                "MelsecplcWorker --> read_payload_cmd_start: cmd_start['Device_Info] = "
                + str(cmd_start['Device_Info']))
            sendout_topic = topic + "/Ack"

            if cmd_start['Cmd_Type'] == "Start":
                connect_status = 'OK'

                self.devices.update({device_name: cmd_start['Device_Info']})

                # 連線設定完成
                if not self.start_to_collect:
                    self.create_devices()

                for status in self.last_status:
                    if status.p_device == None:
                        connect_status = 'NG'

                ret_json = {}
                ret_json.update({'Cmd_Result': connect_status})
                ret_json.update({'Trace_ID': cmd_start['Trace_ID']})
                json_msg = json.dumps(ret_json)
                self.Job_queue.put(
                    [MqttMessage(topic=sendout_topic, payload=json_msg)])

                if connect_status == 'NG':
                    self.Status = "Down"
                    self.report_alarm('1001', 'ERROR', 'Connected PLC Faild')
                else:
                    self.Status = "Ready"
                    self.report_alarm('0001', 'INFO',
                                      'Connected PLC successed')

            else:
                ret_json = {}
                ret_json.update({'Cmd_Result': "NG"})
                ret_json.update({'Trace_ID': cmd_start['Trace_ID']})
                json_msg = json.dumps(ret_json)
                self.Job_queue.put(
                    [MqttMessage(topic=sendout_topic, payload=json_msg)])
                # 補送Alarm to MQTT
                self.report_alarm('1002', 'ERROR', 'Cmd_Type not Start')

        except Exception as ee:
            logger.log_exception(_LOGGER, 'Cmd Start Exception Error Msg : %s',
                                 str(ee))
            self.Status = "Down"
            ret_json = {}
            ret_json.update({'Cmd_Result': "NG"})
            ret_json.update({'Trace_ID': cmd_start['Trace_ID']})
            json_msg = json.dumps(ret_json)
            self.Job_queue.put(
                [MqttMessage(topic=sendout_topic, payload=json_msg)])
            # 補送Alarm to MQTT
            self.report_alarm(
                '1003', 'ERROR',
                'Cmd_Type Start happened exception error :' + str(ee))

    def read_payload_cmd_readdata(self, device_name, payload, topic):

        try:
            cmd_read = {}
            cmd_read = json.loads(payload)

            _LOGGER.info(
                "MelsecplcWorker --> read_payload_cmd_parameter: cmd_parameter = "
                + str(cmd_read))
            sendout_topic = topic + "/Ack"

            if cmd_read['Cmd_Type'] == "Collect":
                self.addr_array = cmd_read['Address_Info']
                interval = int(cmd_read['Report_Interval'])

                self.flag_started = False
                self._scheduler.pause()
                self._scheduler.reschedule_job(job_id=self.ReadData_job_id,
                                               trigger='interval',
                                               seconds=interval)
                self._scheduler.resume()
                self.flag_started = True

                ret_json = {}
                ret_json.update({'Cmd_Result': "OK"})
                ret_json.update({'Trace_ID': cmd_read['Trace_ID']})
                json_msg = json.dumps(ret_json)
                self.Job_queue.put(
                    [MqttMessage(topic=sendout_topic, payload=json_msg)])

            else:
                ret_json = {}
                ret_json.update({'Cmd_Result': "NG"})
                ret_json.update({'Trace_ID': cmd_read['Trace_ID']})
                json_msg = json.dumps(ret_json)
                self.Job_queue.put(
                    [MqttMessage(topic=sendout_topic, payload=json_msg)])
                _LOGGER.error(
                    "MelsecplcWorker --> read_payload_cmd_NG tpye not Collect, cmd_read_type = "
                    + str(cmd_read['Cmd_Type']))
                # 補送Alarm to host
                self.report_alarm('1004', 'ERROR', 'Type not Collect')

        except Exception as ee:
            logger.log_exception(_LOGGER, 'Cmd Start Exception Error Msg : %s',
                                 str(ee))
            ret_json = {}
            ret_json.update({'Cmd_Result': "NG"})
            ret_json.update({'Trace_ID': cmd_read['Trace_ID']})
            json_msg = json.dumps(ret_json)
            self.Job_queue.put(
                [MqttMessage(topic=sendout_topic, payload=json_msg)])
            self.Status = "Down"
            self.report_alarm(
                '1005', 'ERROR',
                'Cmd_Type Collect happened exception error :' + str(ee))

    def read_payload_parameter_request(self, device_name, payload):
        parameter_request = {}
        parameter_request = json.loads(payload)
        _LOGGER.info(
            "MelsecplcWorker --> read_payload_parameter_request: parameter_request = "
            + parameter_request)

    def cmd_stop(self, value):
        if value == KILL_ME:
            sys.exit("END")
        elif value == STOP_COLLECT:
            self._scheduler.pause()
            for status in self.last_status:
                self.set_stop_flag()
                self.last_status = None

    def status_update(self):

        _LOGGER.info("MelsecplcWorker --> Heartbit Report")
        now = datetime.datetime.now()
        sendout_topic = self.HeartBeat_Topic.replace(
            "{gateway}", self.gateway_id).replace("{device}", self.device_name)

        HB_json = {}
        HB_json.update({'Version': self.Version})
        HB_json.update({'Status': self.Status})
        HB_json.update({'HBDatetime': now.strftime("%Y%m%d%H%M%S%f")[:-3]})
        json_msg = json.dumps(HB_json)
        _LOGGER.debug("Heartbit Report : " + json_msg)

        ret = []
        messages = []
        messages.append(MqttMessage(topic=sendout_topic, payload=json_msg))

        ret += messages

        return ret

    def report_alarm(self, code, level, desc):

        now = datetime.datetime.now()
        report_alarm_Topic = self.Alarm_Topic.replace(
            "{gateway}", self.gateway_id).replace("{device}", self.device_name)

        Alarm_json = {}
        Alarm_json.update({'AlarmCode': code})
        Alarm_json.update({'AlarmLevel': level})
        Alarm_json.update({'AlarmApp': 'WORKER'})
        Alarm_json.update({'DateTime': now.strftime("%Y%m%d%H%M%S%f")[:-3]})
        Alarm_json.update({'AlarmDesc': desc})
        json_msg = json.dumps(Alarm_json)

        self.Job_queue.put(
            [MqttMessage(topic=report_alarm_Topic, payload=json_msg)])
        _LOGGER.error("MelsecplcWorker --> Report Alarm to Host Msg : " +
                      str(json_msg))

    def on_command(self, topic, value):

        value = value.decode('utf-8')\

        # 設定改在其他地方
        _, _, gateway_id, device_name, cmd_type, cmd = topic.split('/')

        if cmd_type == "Cmd":

            _LOGGER.info("MelsecplcWorker --> on_command: topic = " + topic)
            _LOGGER.info("MelsecplcWorker --> on_command: gateway_id = " +
                         gateway_id)
            _LOGGER.info("MelsecplcWorker --> on_command: device_name = " +
                         device_name)
            _LOGGER.info("MelsecplcWorker --> on_command: cmd_type = " +
                         cmd_type)
            _LOGGER.info("MelsecplcWorker --> on_command: cmd = " + cmd)

            if cmd == "Start":
                self.read_payload_cmd_start(device_name, value, topic)
            elif cmd == "ReadData":
                self.read_payload_cmd_readdata(device_name, value, topic)
            elif cmd == "Stop":
                self.cmd_stop(value)
                _LOGGER.error("MelsecplcWorker --> on_command = " + cmd +
                              "Stop Collect Data")
                self.Status = "Idle"

            elif cmd == "OTA":
                cmd_OTA = json.loads(value)
                if cmd_OTA["Cmd"] == "OTA":
                    OTA_json = {}
                    OTA_json.update({'Trace_ID': cmd_OTA["Trace_ID"]})
                    OTA_json.update({'Version': self.Version})
                    OTA_json.update({'Status': self.Status})
                    OTA_json.update(
                        {'Datetime': now.strftime("%Y%m%d%H%M%S%f")[:-3]})
                    OTA_json.update({'ProcrssID': os.getpid()})
                    json_msg = json.dumps(OTA_json)
                    sendout_topic = topic + "/Ack"
                    self.Job_queue.put(
                        [MqttMessage(topic=sendout_topic, payload=json_msg)])
                    time.sleep(5)
                    self.cmd_stop("kill")

        elif cmd_type == "Parameter":
            if cmd == "Request":
                self.read_payload_parameter_request(device_name, value)
Esempio n. 31
0
"""
Demonstrates how to use the blocking scheduler to schedule a job that executes on 3 second
intervals.
"""

from datetime import datetime
import os
import time

from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler



def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    #scheduler = BlockingScheduler()
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', seconds=3, id='test')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
	scheduler.reschedule_job('test', trigger='cron', minute='*/1')
	time.sleep(1000)
    except (KeyboardInterrupt, SystemExit):
        pass
Esempio n. 32
0
class Planner:
    def __init__(self, db_url):
        job_defaults = dict(misfire_grace_time=180,
                            coalesce=True,
                            max_instances=1,
                            next_run_time=None)
        self.scheduler = BackgroundScheduler(timezone='UTC',
                                             job_defaults=job_defaults)
        self.scheduler.add_jobstore('sqlalchemy',
                                    url=db_url + '?application_name=aps')
        self.scheduler.start()
        log.info('started planner scheduler')

        for idx, j in enumerate(self.get_jobs()):
            log.info(
                '%d. name:%s trigger:%s func:%s args:%s kwargs:%s next:%s',
                idx + 1, j['name'], j['trigger'], j['func'], j['args'],
                j['kwargs'], j['next_run_time'])

    def _job_to_dict(self, job):
        if isinstance(job.trigger, IntervalTrigger):
            trigger = 'interval'
        elif isinstance(job.trigger, DateTrigger):
            trigger = 'date'
        elif isinstance(job.trigger, CronTrigger):
            trigger = 'cron'
        else:
            trigger = 'unknown'
        next_run_time = ""
        if job.next_run_time:
            next_run_time = job.next_run_time.strftime("%Y-%m-%dT%H:%M:%SZ")
        return dict(id=job.id,
                    name=job.name,
                    func=job.func_ref,
                    args=job.args,
                    kwargs=job.kwargs,
                    trigger=trigger,
                    next_run_time=next_run_time)

    def add_job(
            self,
            func=None,
            trigger=None,
            args=None,
            kwargs=None,
            job_id=None,
            name=None,
            misfire_grace_time=None,  # pylint: disable=too-many-arguments
            coalesce=True,
            max_instances=None,
            next_run_time=None,
            replace_existing=False,
            trigger_args=None):
        if trigger_args is None:
            trigger_args = {}

        all_kw_args = dict(args=args,
                           kwargs=kwargs,
                           id=job_id,
                           name=name,
                           replace_existing=replace_existing)

        if misfire_grace_time is not None:
            all_kw_args['misfire_grace_time'] = misfire_grace_time
        if coalesce is not None:
            all_kw_args['coalesce'] = coalesce
        if max_instances is not None:
            all_kw_args['max_instances'] = max_instances
        if next_run_time is not None:
            all_kw_args['next_run_time'] = next_run_time

        if trigger in ['interval', 'repo_interval']:
            trigger = IntervalTrigger(**trigger_args)
        elif trigger == 'date':
            trigger = DateTrigger(**trigger_args)
        elif trigger == 'cron':
            trigger = CronTrigger(**trigger_args)
        else:
            raise Exception('unknown trigger type %s' % trigger)

        all_kw_args['trigger'] = trigger
        log.info('add_job args: %s', all_kw_args)

        try:
            job = self.scheduler.add_job(func, **all_kw_args)
            log.info('add_job job:%s', job)
        except Exception:
            log.exception('some problem')
            raise
        return self._job_to_dict(job)

    def get_jobs(self):
        try:
            jobs = []
            for j in self.scheduler.get_jobs():
                jobs.append(self._job_to_dict(j))
        except Exception:
            log.exception('some problem')
            raise
        return jobs

    def reschedule_job(self, job_id=None, trigger=None, trigger_args=None):
        if trigger_args is None:
            trigger_args = {}

        log.info('reschedule_job args: %s %s %s', job_id, trigger,
                 trigger_args)

        try:
            job = self.scheduler.reschedule_job(job_id,
                                                trigger=trigger,
                                                **trigger_args)
            log.info('reschedule_job job:%s', job)
        except Exception:
            log.exception('some problem')
            raise
        return self._job_to_dict(job)

    def remove_job(self, job_id=None):
        log.info('remove_job arg: %s', job_id)

        try:
            self.scheduler.remove_job(job_id)
            log.info('remove_job done')
        except Exception:
            log.exception('some problem')
Esempio n. 33
0
class Scheduler:
    TRIGGERS = {
        "trig_5minutes": {
            "id": "trig_5minutes",
            "name": "Every five minutes",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(minute="*/5"),
            "from_trigger": lambda trig: []
        },
        "trig_hourly": {
            "id": "trig_hourly",
            "name": "Each hour",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(hour="*"),
            "from_trigger": lambda trig: []
        },
        "trig_daily": {
            "id": "trig_daily",
            "name": "Each day",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(day="*"),
            "from_trigger": lambda trig: []
        },
        "trig_weekday": {
            "id":
            "trig_weekday",
            "name":
            "Each weekday",
            "options": [{
                "id": i,
                "name": el,
                "active": True
            } for i, el in enumerate("Mon Tue Wed Thu Fri Sat Sun".split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 6
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(day_of_week=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[4]).split(",")]
        },
        "trig_monthly": {
            "id":
            "trig_monthly",
            "name":
            "Each month",
            "options": [{
                "id": i + 1,
                "name": el,
                "active": True
            } for i, el in enumerate(("Jan Feb Mar Apr May Jun "
                                      "Jul Aug Sep Oct Nov Dec").split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 12
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(month=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[1]).split(",")]
        },
    }
    """Predefined triggers and their argument checks."""
    def __init__(self,
                 elastic,
                 crawler_dir="crawlers",
                 crawler_args={},
                 **cron_defaults):
        """Initializes the scheduler by binding it to it's elasticsearch db.

        Args:
            elastic (elasticsearch.Elasticsearh): The es-client to save the
                crawling jobs in.
            crawler_dir (str): the directory, where the crawlers will be found.
                Defaults to "crawlers".
            job_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.
            **cron_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.

        Returns:
            Scheduler: a fresh Scheduler instance.
        """
        jobstores = {
            "default": {
                "type": "memory"
            },
            "elastic": InjectorJobStore(kwargs=crawler_args, client=elastic)
        }

        executors = {
            "default": ThreadPoolExecutor(10),
            "processpool": ProcessPoolExecutor(10)
        }

        job_defaults = {
            "misfire_grace_time": 5 * 60,  # 5min
            "coalesce": True,
        }

        self.cron_defaults = utility.DefaultDict(
            {
                # standard is every day at 00:00:00
                "hour": 0,
                "minute": 0,
                "second": 0
            },
            **cron_defaults)

        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults,
                                             timezone=utc)

        self.crawlers = _detect_crawlers()
        # set up the validator schema.
        self.job_validator = cerberus.Validator(SCHEMATA["job"]({
            "trigger_ids":
            list(self.TRIGGERS)
        }),
                                                allow_unknown=True)
        self.scheduler.start()

    def upsert_job(self, job_dict, **runtime_args):
        """Adds or updates a job using the provided user_input.

        If an id field is present in the dict, the job is updated, otherwise
        a new one is created.

        Args:
            job_dict (dict): user input for a job, as defined in `SCHEMATA`.
            **runtime_args (dict): additional runtime arguments for the
                crawler.

        Returns:
            apscheduler.job.Job: a new job Object.
        """
        if not self.job_validator.validate(job_dict):
            raise (AssertionError(str(self.job_validator.errors)))

        doc = utility.SDA(job_dict)

        job = self.crawlers.get(doc["crawler.id"], None)
        # default to the SearchPlugin, and give the search name as argument.
        if job is None:
            inst = {
                "args": ("SearchPlugin", runtime_args),
                "kwargs": dict(search_id=doc["crawler.id"])
            }
        else:
            inst = {"args": (doc["crawler.id"], runtime_args), "kwargs": {}}
        trigger = self._make_trigger(doc["schedule"])

        if doc["id"]:
            self.scheduler.modify_job(doc["id"],
                                      jobstore="elastic",
                                      func=_run_plugin,
                                      name=doc["name.name"],
                                      **inst)
            new_job = self.scheduler.reschedule_job(doc["id"],
                                                    jobstore="elastic",
                                                    trigger=trigger)
        else:
            # use the crawler id as name, when the job is created.
            new_job = self.scheduler.add_job(_run_plugin,
                                             jobstore="elastic",
                                             trigger=trigger,
                                             name=doc["crawler.id"],
                                             **inst)

        return new_job

    def get_triggers(self):
        """Returns a list of triggers, that are predefined in the system.

        Returns:
            list: a list of tuples, holding id and name for each trigger.
        """
        return [{
            "id": v["id"],
            "name": v["name"],
            "options": v["options"]
        } for v in self.TRIGGERS.values()]

    def sync_jobs(self, joblist):
        """Synchronize the current jobs with a given list of jobs.

        This means, that all jobs not included in the list will be removed,
        existing ones will be updated and new ones will be added to the
        scheduler.

        Args:
            joblist (list): a list of jobs in the format of the schema.

        Returns:
            bool: whether this operation was successful or not.
        """
        logger.debug("Syncing job lists ...")
        current_jobs = self.get_jobs()
        jobs_to_keep = {j["id"] for j in joblist if j.get("id")}

        # remove old jobs
        for job in current_jobs:
            if job["id"] not in jobs_to_keep:
                self.scheduler.remove_job(job["id"], jobstore="elastic")

        # update and add jobs
        for job in joblist:
            self.upsert_job(job)

        return True

    def _make_trigger(self, trigger_doc):
        """Creates a trigger from a given dictionary of user input."""
        # we can assume, that an id for the trigger is given in the input.
        cur_trigger = self.TRIGGERS[trigger_doc["id"]]
        option_validator = cerberus.Validator(cur_trigger["schema"])

        args = [
            o["id"] for o in trigger_doc["options"]
            if option_validator(o) and o["active"]
        ]

        trigger_args = cur_trigger["trigger_args"](args)
        return CronTrigger(**trigger_args)

    def _serialize_trigger(self, trigger):
        """Serializes a trigger into a json array, as defined in TRIGGERS."""
        # since we only have a defined set of triggers, the following is
        # possible.
        mapping = [(v["trigger_args"]([]).keys(), k)
                   for k, v in self.TRIGGERS.items()]

        trigger_doc = None
        result = {}
        for keys, name in mapping:
            # all keys for the mapping need to be defined.
            def_keys = [f.name for f in trigger.fields if not f.is_default]
            if all([(key in def_keys) for key in keys]):
                trigger_doc = self.TRIGGERS[name]
                break

        if not trigger_doc:
            return result

        result["name"] = trigger_doc["name"]
        result["id"] = trigger_doc["id"]
        args = set(trigger_doc["from_trigger"](trigger))
        # copy the list of options (otherwise this leads to nasty side effects)
        options = [dict(**item) for item in trigger_doc["options"]]
        for option in options:
            option["active"] = option["id"] in args
        result["options"] = options

        return result

    def get_jobs(self):
        """Returns a list of jobs that are scheduled in the system.

        Returns:
            list: a list of job-dicts, holding the id and the runtimes.
        """
        jobs = self.scheduler.get_jobs()
        joblist = []
        for job in jobs:
            joblist.append({
                "id": job.id,
                "name": {
                    "name": job.name
                },
                "crawler": {
                    "id": job.args[0]
                },
                "schedule": self._serialize_trigger(job.trigger),
                "next_run": {
                    "name": job.next_run_time
                }
            })
        logger.debug(f"Retrieved {len(joblist)} jobs from the jobstore.")
        return joblist

    def run_job(self, job_id):
        """Runs the job with the specified id immediately.

        Args:
            job_id: the id of the job that should be run.

        Returns:
            bool: whether running the job succeeded or not.
        """
        logger.debug(f"Running job '{job_id}' directly.")
        cur_job = self.scheduler.get_job(job_id, jobstore="elastic")
        if cur_job is None:
            return False

        cur_job.func(*cur_job.args, **cur_job.kwargs)
        return True