예제 #1
0
class Jobs_Scheduler():
    
    __lock_sending_to_msg_q = threading.Lock()
    
    def __init__(self):
        
        self.__scheduler = None
        self.__init_scheduler()
    
    def __init_scheduler(self):
        try:
            
            self.__scheduler = BackgroundScheduler()
            self.__load_scheduled_jobs()
            
        except Exception as e:
            Logger.log_debug('ERROR @ Job_Scheduler -> __init_scheduler')
            Logger.log_error(str(e))
        finally:
            Logger.log_info('Done with initializing job scheduler service')
        
    def start_scheduler(self):
        self.__scheduler.start()
    
    def stop_scheduler(self):
        self.__scheduler.stop()
    
    def __load_scheduled_jobs(self):

        try:
            file_path = cfg.SCHEDULED_JOBS_FILES_PATH

            for file in glob.glob(file_path + "*.shed"):

                dict_msg = CF.load_dictionary_object(file)

                jobid = str(dict_msg["cronjobid"])
                m = str(dict_msg["cronminute"])
                h = dict_msg["cronhour"]
                dow = dict_msg["crondateofweek"]

                self.schedule_a_job(jobid, m, h, dow, None)

        except Exception as e:
            Logger.log_debug("ERROR @ load_scheduled_jobs")
            Logger.log_error(str(e))
    
    def schedule_a_job(self, cronjobid, m="0",h='*',dow='*', message_dict=None):

        #-----------------------------------------
        # jobid format :- <device_com_pk><hub_pk>
        #-----------------------------------------
        ret_val = False
        #Jobs_Scheduler.__job_lock.acquire()
        try:

            if m>=0:

                #dict_obj={"jobid":jobid,"m":m,"h":h,"dow":dow}

                ret_val = True
                if not message_dict == None:
                    
                    file_name = '%s%s%s' % (cfg.SCHEDULED_JOBS_FILES_PATH, cronjobid , ".shed")
                    ret_val = CF.save_dictionary_object(file_name, message_dict)

                if ret_val:
                    #print 'adding a schedule'
                    Logger.log_info('Adding scheduler job ' + str(cronjobid))
                    self.__scheduler.add_job(Jobs_Scheduler.scheduler_callback_function
                                                       ,minute=m,hour=h,day_of_week=dow, id=cronjobid,trigger="cron",kwargs={"jobid":cronjobid})

            return ret_val
        except Exception as e:
            Logger.log_debug("ERROR @ schedule_a_job")
            Logger.log_error(str(e))
            return False
        finally:
            #Jobs_Scheduler.__job_lock.release()
            pass
        
    def remove_a_scheduled_job(self, cronjobid):
        #Jobs_Scheduler.__job_lock.acquire()
        try:

            Logger.log_debug("Removing scheduled job " + cronjobid)
            self.__scheduler.remove_job(cronjobid)
            file_name = cfg.SCHEDULED_JOBS_FILES_PATH + cronjobid + ".shed"
            os.remove(file_name)

            return (True,"")

        except Exception as e:
            Logger.log_debug("ERROR @ remove_a_scheduled_job")
            Logger.log_error(str(e))
            return (False,str(e).replace('u',''))
        finally:
            #Jobs_Scheduler.__job_lock.release()
            pass
        
        
    def print_jobs(self):
        try:
            self.__scheduler.print_jobs()
        
        except Exception as e:
            Logger.log_debug("ERROR @ print_jobs")
            Logger.log_error(str(e))
            
    @staticmethod
    def scheduler_callback_function(jobid):

        Jobs_Scheduler.__lock_sending_to_msg_q.acquire()
        try:
            
            jobid = str(jobid)
            
            Logger.log_info("Scheduler triggered for job "+ jobid)
            file_path = cfg.SCHEDULED_JOBS_FILES_PATH
            file_name = file_path + jobid + ".shed"

            dict_msg = CF.load_dictionary_object(file_name)

            if not dict_msg == None:

                # Send the message to MessageProc q
                result_msg = Jobs_Scheduler.send_job_command(dict_msg)
                
        except Exception as e:
            Logger.log_debug("ERROR @ scheduler_callback_function")
            Logger.log_error(str(e))

        finally:
            Logger.log_debug("End of scheduler_callback_function")
            Jobs_Scheduler.__lock_sending_to_msg_q.release()
            
            
    @staticmethod
    def send_job_command(dict_command):
        try:
            
            cronjobid = dict_command['cronjobid']
            device_code = dict_command['devicecode']
            com_id = dict_command['comid']
            hubcondeviceid = dict_command['hubcondeviceid']
            comcode = dict_command['comcode']
            
            dict_command.update({'u_o_s': 'S'})
            
            zc = ZC()
            mp_sock = zc.connect_zmq(cfg.msg_proc_ip, cfg.msg_proc_port, 15000)
            mp_sock.send(json.dumps(dict_command))
            
            ret_msg = None
            ret_msg = zc.zmq_recv(mp_sock, 15000)
            mp_sock.close()
            del mp_sock

            if not ret_msg == None:
                
                ret_msg = json.loads(ret_msg)
                # Send to notification Q
                
                hub_sno = CF.get_hub_serial_no()
                ret_msg.update({'hub_sno': hub_sno})
                ret_msg.update({'notification_type': 'CRON_EVENT'})
                ret_msg.update({'msg_type' :'NOTIFICATION'})
                ret_msg.update({'notification_dt': datetime.datetime.now().strftime("%Y%m%d %H%M%S")})
                ret_msg.update({'cronjobid': cronjobid})
                ret_msg.update({'devicecode': device_code})
                ret_msg.update({'hubcondeviceid': hubcondeviceid})
                ret_msg.update({'comcode': comcode})
                
                Logger.log_debug('Sending ack to notification queue')
                CF.send_to_notifiction_queue(ret_msg)
            else:
                Logger.log_error('No response back from the message processing queue for the scheduled event.')
            
        except Exception as e:
            Logger.log_debug("ERROR @ Jobs_Scheduler -> send_job_command")
            Logger.log_error(str(e))
    
    """
    @staticmethod
    def send_to_notifiction_queue(dict_msg):
        try:
            zc = ZC()
            en_sock = zc.connect_zmq(cfg.EVENTS_PROCESSOR_IP, cfg.EVENTS_PROCESSOR_PORT)
            en_sock.send(json.dumps(dict_msg))
            
            ret_msg = None
            ret_msg = zc.zmq_recv(en_sock)
            en_sock.close()
            del en_sock
            
            if ret_msg == None:
                return False
            else:
                return True
            
        except Exception as e:
            Logger.log_debug("ERROR @ Jobs_Scheduler -> send_to_notifiction_queue()")
            Logger.log_error(str(e))
            return False
    """
    
    
    
예제 #2
0
파일: stampy.py 프로젝트: iranzo/stampython
def main():
    """
    Main code for the bot
    """

    # Main code
    logger = logging.getLogger(__name__)

    # Set database name in config
    if options.database:
        plugin.config.setconfig(key='database', value=options.database)

    conflogging()

    logger.info(msg="Started execution")

    if not plugin.config.config(key='sleep'):
        plugin.config.setconfig(key='sleep', value=10)

    # Check if we've the token required to access or exit
    if not plugin.config.config(key='token'):
        if options.token:
            token = options.token
            plugin.config.setconfig(key='token', value=token)
        else:
            msg = "Token required for operation, please check"
            msg += " https://core.telegram.org/bots"
            logger.critical(msg)
            sys.exit(1)

    # Check if we've URL defined on DB or on cli and store
    if not plugin.config.config(key='url'):
        if options.url:
            plugin.config.setconfig(key='url', value=options.url)

    # Check if we've owner defined in DB or on cli and store
    if not plugin.config.config(key='owner'):
        if options.owner:
            plugin.config.setconfig(key='owner', value=options.owner)

    scheduler = BackgroundScheduler()
    scheduler.start()

    # Initialize modules
    for i in plugins.getPlugins():
        plug = plugins.loadPlugin(i)
        logger.debug(msg="Processing plugin initialization: %s" % i["name"])
        plug.init()

    # Check operation mode and call process as required
    if options.daemon or plugin.config.config(key='daemon'):
        plugin.config.setconfig(key='daemon', value=True)
        logger.info(msg="Running in daemon mode")
        while plugin.config.config(key='daemon') == 'True':
            process(getupdates())
            sleep(int(plugin.config.config(key='sleep')))
    else:
        logger.info(msg="Running in one-shoot mode")
        process(getupdates())

    scheduler.stop()

    logger.info(msg="Stopped execution")
    logging.shutdown()
    sys.exit(0)
예제 #3
0
파일: timer.py 프로젝트: ANTodorov/zuul
class TimerTrigger(BaseTrigger):
    name = 'timer'
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = 'timer'
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def _shutdown(self):
        self.apsched.stop()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self,
                            types=['timer'],
                            timespecs=toList(trigger['time']))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error("Unable to parse time value '%s' "
                                       "defined in pipeline %s" %
                                       (timespec, pipeline.name))
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom,
                                          day_of_week=dow,
                                          hour=hour,
                                          minute=minute,
                                          second=second)

                    self.apsched.add_job(self._onTrigger,
                                         trigger=trigger,
                                         args=(
                                             pipeline.name,
                                             timespec,
                                         ))
예제 #4
0
파일: timer.py 프로젝트: ANTodorov/zuul
class TimerTrigger(BaseTrigger):
    name = 'timer'
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = 'timer'
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def _shutdown(self):
        self.apsched.stop()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self,
                            types=['timer'],
                            timespecs=toList(trigger['time']))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error(
                            "Unable to parse time value '%s' "
                            "defined in pipeline %s" % (
                                timespec,
                                pipeline.name))
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour,
                                          minute=minute, second=second)

                    self.apsched.add_job(self._onTrigger, trigger=trigger,
                                         args=(pipeline.name, timespec,))
예제 #5
0
    def exposed_pause_job(self, job_id, jobstore=None):
        return scheduler.pause_job(job_id, jobstore)

    def exposed_resume_job(self, job_id, jobstore=None):
        return scheduler.resume_job(job_id, jobstore)

    def exposed_remove_job(self, job_id, jobstore=None):
        scheduler.remove_job(job_id, jobstore)

    def exposed_get_job(self, job_id):
        return scheduler.get_job(job_id)

    def exposed_get_jobs(self, jobstore=None):
        return scheduler.get_jobs(jobstore)


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    scheduler.initialize()
    protocol_config = {'allow_public_attrs': True}
    server = ThreadedServer(SchedulerService,
                            port=12345,
                            protocol_config=protocol_config)
    try:
        server.initialize()
    except (KeyboardInterrupt, SystemExit):
        pass
    finally:
        scheduler.stop()
예제 #6
0
파일: boss.py 프로젝트: zapion/mozMinions
class Boss(object):
    default_path = None
    workers = []

    def __init__(self):
        '''
        local path for load config
        '''
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        if self.default_path:
            self.load(self.default_path)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fn in filenames:
            self.load(os.path.join(dirpath, fn))

    def load(self, fp):
        '''
        given a file
        TBI: directory
        '''
        with open(fp) as in_data:
            data = json.load(in_data)
            minion = ShellMinion(**data)
            self.workers.append(minion)
            self.scheduler.add_job(minion.collect, 'interval',
                                   name=minion.name+'_'+minion.serial, seconds=2
                                   )

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for worker in self.workers:
            print(str(worker))

    def remove(self, sn):
        '''
        given an SN, stop running instance if possible
        TODO: remove it from the list
        '''
        self.scheduler.remove_job(sn)

    def remove_advanced(self):
        '''
        TBD
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def stop(self, sn):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.stop(sn)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass