示例#1
2
class LightManager(object):
    def __init__(self):
        self.setterZones = [LightSetter(zone.mode, zone.pinout, description=zone.description) for zone in zones]
        self.defaultSetterZone = self.setterZones[0]
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

    def setLights(self, color, zone=None):
        if not zone:
            self.defaultSetterZone.setLights(color)
        elif int(zone) in range(len(self.setterZones)):
            self.setterZones[int(zone)].setLights(color)
        else:
            raise InvalidZoneException

    def setEvent(self, secondsUntilEvent, color, zone=None):
        eventTime = datetime.now()+timedelta(seconds=secondsUntilEvent)
        eventId = str(hash((eventTime, color)))
        self.scheduler.add_job(self.setLights, args=(color,zone), next_run_time=eventTime, id=eventId)
        return eventId

    def cancelEvent(self, eventId):
        self.scheduler.remove_job(eventId)

    def getZoneInfo(self):
        return {str(i): {'type': zone.mode, 'description': zone.description} for i, zone in zip(range(len(self.setterZones)), self.setterZones)}
示例#2
0
    def post(self, action, position = ''):
        global scheduler
        self.checkStartup()
        
        if action == 'play':
            runCommand('mpc play ' + position)
            #Settings.set('radio', 'state', 'play')
            
            if scheduler is None:
                scheduler = BackgroundScheduler()
                scheduler.add_job(self.checkStatus, 'interval', seconds=30, id='checkStatus', replace_existing=True)
                scheduler.start()
        elif action == 'stop':
            runCommand('mpc stop')
            #Settings.set('radio', 'state', 'stop')
            
            if scheduler is not None:
                scheduler.remove_job('checkStatus')
                scheduler.shutdown()
                scheduler = None
            return {'playMode': 'stopped'}
        elif action =='pause':
            runCommand('mpc pause')
        elif action =='next':
            runCommand('mpc next')
        elif action =='previous':
            runCommand('mpc prev')
        else:
            return {'playMode': 'invalid'}

        (out, err) = runCommand('mpc status')
        if err:
            return {'error', err}, 500
        return {'playMode': Parser.parsePlayMode(out)}
示例#3
0
class timer:
    '''
    process = timer(function, [para1,para2,...], intervalseconds)
    process.run()
    '''
    def __init__(self, func, paras, seconds, id):
        self.func = func
        self.paras = paras
        self.time = seconds
        self.id = id
        self.scheduler = None
        self.setTimer()

    def setTimer(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(self.func, 'interval', seconds=self.time, id=self.id)

    def add_job(self, func, seconds, id):
        self.scheduler.add_job(func, 'interval', seconds, id)

    def remove_job(self, id):
        self.scheduler.remove_job(id)


    def run(self):
        self.scheduler.start()
示例#4
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler=BlockingScheduler()
        self.scheduler.start()
    def start(self):
        self.oncescheduler.start()
    def addschedule(self,event=None, day_of_week='0-7', hour='11',minute='57' ,second='0',id='',type='cron',run_date='',args=None):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        if type=='date':
            if run_date=='':

                self.oncescheduler.add_job(event, args=args)


            else:

                self.oncescheduler.add_job(event, 'date', run_date=run_date, args=args)
        elif type=='back':
            self.oncescheduler.add_job(event,type, day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)
        else:

            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
    def build(cube_name_list, buildType, start_time=None, end_time=None):
        run_cube_job_id = '1'
        check_cube_job_id = '2'
        scheduler = BackgroundScheduler()
        CubeWorker.job_instance_dict = {}

        for cube_name in cube_name_list:
            CubeWorker.job_instance_dict[cube_name] = None

        CubeWorker.scheduler = scheduler
        CubeWorker.run_cube_job_id = run_cube_job_id
        CubeWorker.check_cube_job_id = check_cube_job_id
        # start the run cube job immediately
        CubeWorker.run_cube_job(buildType, start_time, end_time)

        scheduler.add_job(CubeWorker.run_cube_job, 'interval', seconds=30, id=run_cube_job_id, args=[buildType, start_time, end_time])
        scheduler.add_job(CubeWorker.check_cube_job, 'interval', seconds=30, id=check_cube_job_id)
        scheduler.start()

        while True:
            if CubeWorker.all_finished():
                print "all cube jobs are finished"
                scheduler.remove_job(check_cube_job_id)
                scheduler.remove_job(run_cube_job_id)
                scheduler.shutdown()
                
                status = CubeWorker.get_status()
                print 'Build exited with status %s' % status
                return status == CubeWorkerStatus.SUCCESS

            time.sleep(settings.KYLIN_CHECK_STATUS_INTERVAL)
示例#6
0
def initialize_scheduler():
    """Initialize the task scheduler. This method configures the global
    scheduler, checks the loaded tasks, and ensures they are all scheduled.
    """
    global SCHEDULER

    # If the scheduler is not enabled, clear it and exit. This prevents any
    # unexpected database session issues.
    if not CONF.scheduler.enable:
        if SCHEDULER:
            SCHEDULER.remove_all_jobs()
            SCHEDULER = None
        LOG.info("Scheduler is not enabled.")
        return

    # Use SQLAlchemy as a Job store.
    jobstores = {
        'default': SQLAlchemyJobStore(engine=get_engine())
    }

    # Two executors: The default is for all plugins. The second one is for
    # the scheduler manager, which makes sure this scheduler instance is
    # aware of all of our plugins.
    executors = {
        'default': ThreadPoolExecutor(10),
        'manager': ThreadPoolExecutor(1),
    }

    # Allow executions to coalesce. See https://apscheduler.readthedocs.org/en
    # /latest/userguide.html#missed-job-executions-and-coalescing
    job_defaults = {
        'coalesce': True,
        'max_instances': 1,
        'replace_existing': True
    }

    # This will automatically create the table.
    SCHEDULER = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults,
                                    timezone=utc)

    SCHEDULER.start()
    atexit.register(shutdown_scheduler)

    # Make sure we load in the update_scheduler job. If it exists,
    # we remove/update it to make sure any code changes get propagated.
    if SCHEDULER.get_job(SCHEDULE_MANAGER_ID):
        SCHEDULER.remove_job(SCHEDULE_MANAGER_ID)
    SCHEDULER.add_job(
        update_scheduler,
        id=SCHEDULE_MANAGER_ID,
        trigger=IntervalTrigger(minutes=1),
        executor='manager',
        replace_existing=True
    )
示例#7
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

    def addschedule(self, event, day_of_week="0-7", hour="11", minute="57", second="0", id=""):
        if id == "":
            id = str(time.strftime("%Y-%m-%d %X", time.localtime()))
        self.scheduler.add_job(event, "cron", day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)

    def removeschedule(self, id):
        self.scheduler.remove_job(id)
示例#8
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
 

    def addschedule(self,event, day_of_week='0-7', hour='11',minute='57' ,second='0',id=''):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        self.scheduler.add_job(event,'cron', day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)    
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
示例#9
0
class Scheduler(object):

    def __init__(self, bot):
        self.sched = BackgroundScheduler()
        self.sched.start()
        self.bot = bot
        self.job_id = []

    def __del__(self):
        self.shutdown()

    def shutdown(self):
        self.sched.shutdown()

    def kill_scheduler(self, job_id):
        try:
            self.sched.remove_job(job_id)
            if self.job_id.count != 0:
                self.job_id.remove(job_id)
        except JobLookupError as err:
            return

    def scheduler(self, type, job_id, myFunc, *args):
        # 스케줄러 중복 방지
        check=True
        for ch in self.job_id:
            if ch == job_id:
                check=False
            else:
                pass

        if check:
            if type == 'interval':
                self.sched.add_job(myFunc, type, seconds=10, id=job_id, args=args)
            elif type == 'cron':
                self.sched.add_job(myFunc, type, day_of_week='mon-sat',
                                                    hour='7-20', minute='*/10',
                                                    id=job_id, args=args)
            self.job_id.append(job_id)
        else:
            pass
示例#10
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler = BlockingScheduler()
        self.scheduler.start()

    def start(self):
        self.oncescheduler.start()

    def addschedule(
        self,
        event=None,
        day_of_week="0-7",
        hour="11",
        minute="57",
        second="0",
        id="",
        type="cron",
        run_date="",
        args=None,
    ):
        if id == "":
            id = str(time.strftime("%Y-%m-%d %X", time.localtime()))
        if type == "date":
            if run_date == "":

                self.oncescheduler.add_job(event, args=args)

            else:

                self.oncescheduler.add_job(event, "date", run_date=run_date, args=args)
        else:
            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)

    def removeschedule(self, id):
        self.scheduler.remove_job(id)
class WorkersManager:
    class Command:
        def __init__(self, callback, timeout, args=(), options=dict()):
            self._callback = callback
            self._timeout = timeout
            self._args = args
            self._options = options
            self._source = '{}.{}'.format(
                callback.__self__.__class__.__name__ if hasattr(
                    callback, '__self__') else callback.__module__,
                callback.__name__)

        def execute(self):
            messages = []
            with timeout(
                    self._timeout,
                    exception=TimeoutError(
                        'Execution of command {} timed out after {} seconds'.
                        format(self._source, self._timeout))):
                messages = self._callback(*self._args)

            _LOGGER.debug('Execution result of command %s: %s', self._source,
                          messages)
            return messages

    def __init__(self, config):
        self._mqtt_callbacks = []
        self._update_commands = []
        self._scheduler = BackgroundScheduler(timezone=utc)
        self._daemons = []
        self._config = config
        self._command_timeout = config.get('command_timeout', 35)

    def register_workers(self):
        for (worker_name, worker_config) in self._config['workers'].items():
            #print("WorkerManager --> register_workers : start to import workers")
            module_obj = importlib.import_module("workers.%s" % worker_name)
            klass = getattr(module_obj, "%sWorker" % worker_name.title())

            # if module_obj.REQUIREMENTS is not None:
            #   self._pip_install_helper(module_obj.REQUIREMENTS)

            command_timeout = worker_config.get('command_timeout',
                                                self._command_timeout)

            # constructor
            worker_obj = klass(command_timeout, **worker_config['args'])

            if not hasattr(worker_obj, 'status_update') and not hasattr(
                    worker_obj, 'run'):
                raise "%s cannot be initialized, it has to define run or status_update method" % worker_name

            if hasattr(worker_obj, 'status_update'):
                _LOGGER.debug(
                    "Added %s worker with %d seconds interval and a %d seconds timeout",
                    repr(worker_obj), worker_config['update_interval'],
                    worker_obj.command_timeout)
                command = self.Command(worker_obj.status_update,
                                       worker_obj.command_timeout, [])
                self._update_commands.append(command)

                if 'update_interval' in worker_config:
                    job_id = '{}_interval_job'.format(worker_name)
                    interval_job = self._scheduler.add_job(
                        partial(self._queue_command, command),
                        'interval',
                        seconds=worker_config['update_interval'],
                        id=job_id)
                    self._mqtt_callbacks.append(
                        (worker_obj.format_topic('update_interval'),
                         partial(self._update_interval_wrapper, command,
                                 job_id)))
            if hasattr(worker_obj, 'run'):
                print("has run attr")
                _LOGGER.debug("Registered %s as daemon", repr(worker_obj))
                self._daemons.append(worker_obj)

            if 'topic_subscription' in worker_config:
                self._mqtt_callbacks.append(
                    (worker_config['topic_subscription'],
                     partial(self._on_command_wrapper, worker_obj)))
                print("WorkerManager --> register_workers: _mqtt_callbacks = ",
                      self._mqtt_callbacks)

        if 'topic_subscription' in self._config:
            for (callback_name,
                 options) in self._config['topic_subscription'].items():
                self._mqtt_callbacks.append(
                    (options['topic'],
                     lambda client, _, c: self._queue_if_matching_payload(
                         self.Command(getattr(self, callback_name), self.
                                      _command_timeout), c.payload, options[
                                          'payload'])))

        return self

    def start(self, mqtt):
        mqtt.callbacks_subscription(self._mqtt_callbacks)

        self._scheduler.start()
        self.update_all()

        for daemon in self._daemons:
            threading.Thread(target=daemon.run, args=[mqtt],
                             daemon=True).start()

    def _queue_if_matching_payload(self, command, payload, expected_payload):
        if payload.decode('utf-8') == expected_payload:
            self._queue_command(command)

    def update_all(self):
        _LOGGER.debug("Updating all workers")
        for command in self._update_commands:
            self._queue_command(command)

    @staticmethod
    def _queue_command(command):
        _WORKERS_QUEUE.put(command)

    @staticmethod
    def _pip_install_helper(package_names):
        for package in package_names:
            pip_main(['install', '-q', package])
        logger.reset()

    def _update_interval_wrapper(self, command, job_id, client, userdata, c):
        _LOGGER.info("Recieved updated interval for %s with: %s", c.topic,
                     c.payload)
        try:
            new_interval = int(c.payload)
            self._scheduler.remove_job(job_id)
            self._scheduler.add_job(partial(self._queue_command, command),
                                    'interval',
                                    seconds=new_interval,
                                    id=job_id)
        except ValueError:
            logger.log_exception(_LOGGER, 'Ignoring invalid new interval: %s',
                                 c.payload)

    def _on_command_wrapper(self, worker_obj, client, userdata, c):
        _LOGGER.debug("Received command for %s on %s: %s", repr(worker_obj),
                      c.topic, c.payload)
        global_topic_prefix = userdata['global_topic_prefix']
        topic = c.topic[len(global_topic_prefix + '/'
                            ):] if global_topic_prefix is not None else c.topic
        self._queue_command(
            self.Command(worker_obj.on_command, worker_obj.command_timeout,
                         [topic, c.payload]))
示例#12
0
class Competitions:
    def __init__(self, clist_user_name, clist_api_key, mount_point, bot, fallback):
        self.clist_user_name = clist_user_name
        self.clist_api_key = clist_api_key
        self.bot = bot
        self.ong = None
        self.upc = None
        self.mount_point = mount_point
        self.utility = ContestUtility(mount_point)
        self.jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///' + mount_point + 'coders1.db')
        }
        self.schedule = BackgroundScheduler(jobstores=self.jobstores)
        self.schedule.start()
        self.conv_handler = ConversationHandler(
            entry_points=[CommandHandler('upcoming', self.upcoming)],
            allow_reentry=True,
            states={
                SCHED: [CallbackQueryHandler(self.remind, pattern=r"^[0-9]*$")]
            },
            fallbacks=[fallback]
        )
        self.conv_handler1 = ConversationHandler(
            entry_points=[CommandHandler('dontRemindMe', self.removeRemind)],
            allow_reentry=True,
            states={
                REMNOTI: [CallbackQueryHandler(self.remnoti, pattern=r'^.*notiplz.*$')]
            },

            fallbacks=[fallback]
        )

    @staticmethod
    def clist_requester(url, payload):
        response = requests.get(url, headers={'Content-Type': 'application/json', 'User-agent': 'Mozilla/5.0'},
                                params=payload, verify=False)
        return response.text

    # COMMAND HANDLER FUNCTION TO SHOW LIST OF ONGOING COMPETITIONS
    @timeouts.wrapper_for_class_methods
    def ongoing(self, bot, update):
        # PARSING JSON
        date1 = update.message.date
        payload = {'limit': '15', 'start__lt': str(date1), 'end__gt': str(date1),
                   'username': self.clist_user_name, 'api_key': self.clist_api_key,
                   'format': 'json', 'order_by': 'end'}
        raw_data = self.clist_requester(url="https://clist.by/api/v1/contest/", payload=payload)
        try:
            json_data = json.loads(raw_data)
            search_results = json_data['objects']
            self.utility.ongoing_sender(update=update, contest_list=search_results)
            self.ong = search_results
        except:
            self.utility.ongoing_sender(update, self.ong)

    @timeouts.wrapper_for_class_methods
    def upcoming(self, bot, update):
        # PARSING JSON
        date1 = update.message.date
        payload = {'limit': '15', 'start__gt': str(date1), 'order_by': 'start',
                   'username': self.clist_user_name,
                   'api_key': self.clist_api_key, 'format': 'json'}
        raw_data = self.clist_requester(url="https://clist.by/api/v1/contest/", payload=payload)
        try:
            json_data = json.loads(raw_data)
            search_results = json_data['objects']
            self.utility.upcoming_sender(update=update, contest_list=search_results)
            self.upc = search_results
        except:
            self.utility.upcoming_sender(update, self.upc)
        return SCHED

    # FUNCTION TO SET REMINDER
    def remind(self, bot, update):
        query = update.callback_query
        msg = query.data
        if str(msg).isdigit():
            msg = int(msg) - 1
            start1 = ContestUtility.time_converter(self.upc[msg]['start'], '-0030')
            dateT = str(self.upc[msg]['start']).replace("T", " ").split(" ")
            start1 = start1.replace("T", " ").split(" ")
            date = dateT[0].split("-")
            date1 = start1[0].split("-")
            time1 = start1[1].split(":")

            cur_time = datetime.now()

            if not cur_time >= datetime(int(date[0]), int(date[1]), int(date[2]), 0, 0):
                self.schedule.add_job(self.remindmsgDay, 'cron', year=date[0], month=date[1], day=date[2], replace_existing=True,
                                 id=str(query.message.chat_id) + str(self.upc[msg]['id']) + "0",
                                 args=[str(query.message.chat_id),
                                       str(self.upc[msg]['event']) + "\n" + str(self.upc[msg]['href'])])
            if not cur_time >= datetime(int(date1[0]), int(date1[1]), int(date1[2]), int(time1[0]), int(time1[1])):
                self.schedule.add_job(self.remindmsg, 'cron', year=date1[0], month=date1[1], day=date1[2], hour=time1[0],
                                 minute=time1[1],
                                 replace_existing=True,
                                 id=str(query.message.chat_id) + str(self.upc[msg]['id']) + "1",
                                 args=[str(query.message.chat_id),
                                       str(self.upc[msg]['event'] + "\n" + str(self.upc[msg]['href']))])
                bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id,
                                      text="I will remind you about " + self.upc[msg][
                                          'event'] + "\nYou can use command /dontremindme to cancel reminder")
                if query.message.chat_id < 0:
                    bot.send_message(chat_id=query.message.chat_id,
                                     text="I detected that this is a group. The reminder will be sent to the group. If you want to get reminder personally then use this command in private message")
            else:
                bot.edit_message_text(chat_id=query.message.chat_id, message_id=query.message.message_id,
                                      text="Sorry contest has already started")
        return ConversationHandler.END

    # WHAT HAPPENSWHEN REMINDER IS DEPLOYED
    def remindmsgDay(self, chatId, message):
        self.bot.send_message(chat_id=chatId, text="You have a contest within 24 hours\n " + message)

    def remindmsg(self, chatId, message):
        self.bot.send_message(chat_id=chatId, text="Your contest starts in half an hour\n " + message)

    @timeouts.wrapper_for_class_methods
    def removeRemind(self, bot, update):
        conn = sqlite3.connect(self.mount_point + 'coders1.db')
        c = conn.cursor()
        c.execute("SELECT id FROM apscheduler_jobs WHERE id LIKE  " + "'" + str(
            update.message.chat_id) + "%' AND id LIKE " + "'%1'")
        if c.fetchone():
            c.execute("SELECT id FROM apscheduler_jobs WHERE id LIKE  " + "'" + str(
                update.message.chat_id) + "%' AND id LIKE " + "'%1'")
            a = c.fetchall()
            keyboard = []
            for i in range(0, len(a)):
                s = str(a[i]).replace("('", "").replace("',)", "").replace(
                    '("', "").replace('",)', "")
                print(s)
                keyboard.append([InlineKeyboardButton(str(self.schedule.get_job(job_id=s).args[1].split("\n")[0]),
                                                      callback_data=s[:-1] + "notiplz")])
            reply_markup = InlineKeyboardMarkup(keyboard)
            update.message.reply_text("Here are your pending reminders\nSelect the reminder you want to remove",
                                      reply_markup=reply_markup)
            c.close()
            return REMNOTI
        else:
            c.close()
            update.message.reply_text("You have no pending reminders")
            return ConversationHandler.END

    def remnoti(self, bot, update):
        query = update.callback_query
        val = str(query.data).replace("notiplz", "")
        try:
            self.schedule.remove_job(val + "0")
        except:
            pass
        try:
            self.schedule.remove_job(val + "1")
        except:
            pass
        bot.edit_message_text(text="Reminder removed", message_id=query.message.message_id,
                              chat_id=query.message.chat_id)
        return ConversationHandler.END
示例#13
0
class CronManager:
    def __init__(self, use_mongo_db=True):

        self.scheduler = BackgroundScheduler(timezone=shanghai_tz)
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronTab',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        if not isinstance(cron_instance, Cron):
            raise TypeError('please add correct cron!')

        if cron_instance.trigger_type == 'interval':
            seconds = cron_instance.trigger_args.get('seconds')
            if not isinstance(seconds,
                              int) and not common.can_convert_to_int(seconds):
                raise TypeError('请输入合法的时间间隔!')
            seconds = int(seconds)
            if seconds <= 0:
                raise TypeError('请输入大于0的时间间隔!')
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                seconds=seconds,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id(),
                max_instances=5,
                jitter=0)  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'date':
            run_date = cron_instance.trigger_args.get('run_date')
            # TODO 判断run_date类型
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                run_date=run_date,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id())  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'cron':
            raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

        return cron_instance.get_id()

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_id, cron_info):
        if not isinstance(cron_id, str):
            raise TypeError('cron_id must be str')

        if not isinstance(cron_info, dict):
            raise TypeError('cron_info must be dict')

        trigger_type = cron_info.get('triggerType')
        interval = cron_info.get('interval')
        run_date = cron_info.get('runDate')
        test_case_suite_id_list = cron_info.get('testCaseSuiteIdList')
        is_execute_forbiddened_case = cron_info.get('isExecuteForbiddenedCase')
        test_case_id_list = cron_info.get('testCaseIdList')
        test_domain = cron_info.get('testDomain')
        alarm_mail_list = cron_info.get('alarmMailList')
        is_ding_ding_notify = cron_info.get('isDingDingNotify')
        ding_ding_access_token = cron_info.get('dingdingAccessToken')
        ding_ding_notify_strategy = cron_info.get('dingdingNotifyStrategy')
        is_enterprise_wechat_notify = cron_info.get('isEnterpriseWechatNotify')
        enterprise_wechat_access_token = cron_info.get(
            'enterpriseWechatAccessToken')
        enterprise_wechat_notify_strategy = cron_info.get(
            'enterpriseWechatNotifyStrategy')
        cron_name = cron_info.get('name')

        try:
            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                # TODO 判断run_date类型
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    run_date=run_date,
                    cron_name=cron_name)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    seconds=interval,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    cron_name=cron_name)
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_id,
                                      coalesce=True,
                                      args=[cron])

        except BaseException as e:
            raise TypeError('更新定时任务失败: %s' % e)

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_crons(self):
        return self.scheduler.get_jobs()
示例#14
0
class MainRunner(object):
    workers = {}
    dirpath = "."
    defaultOutputPath = "output"

    def __init__(self, dirpath="."):
        """
        local path for load config
        """
        logger.info("Initialing Main Runner for Fuzzy Testing")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if "json" in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        """
        given a json file, load and create a task run regularly
        """
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data["path"] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if "interval" in data:
                interval = int(data["interval"])

            if "output" in data:
                if "defaultOutputPath" in data["output"]:
                    self.defaultOutputPath = data["output"]["defaultOutputPath"]
                if "dirpath" in data["output"]:
                    data["output"]["outputPath"] = os.path.join(self.defaultOutputPath, data["output"]["dirpath"])
            else:
                data["output"] = {"outputPath": self.defaultOutputPath}

            if "type" not in data:
                logger.error(
                    "Missing type attribute in \
                                your configuration file [%s]"
                    % fp
                )
                return None

            if fp in self.workers:  # existing runner found
                logger.info("Update exisitng runner [%s]" % fp)
                runner = self.workers[fp]
                runner.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp, func=runner.run, name=runner.name)

            else:  # Create new
                logger.info("Create new runner [%s]" % fp)
                module_path = data["type"][: data["type"].rfind(".")]
                object_name = data["type"][data["type"].rfind(".") + 1 :]
                try:
                    runner_module = getattr(importlib.import_module(module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                runner = runner_module(**data)
                self.workers[fp] = runner
                self.scheduler.add_job(runner.run, "interval", id=fp, name=runner.name, seconds=interval)
            return runner
        return None

    def list(self):
        """
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        """
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        """
        given file path, stop running instance if possible
        """
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        """
        TODO:
        1. remove by start, end
        2. by directory(?)
        """
        pass

    def unload_all(self):
        """
        stop all running instances
        """
        self.scheduler.shutdown()

    def pause(self, fp):
        """
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        """
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        """
        For periodical minions, waking them according to timing
        """
        pass
示例#15
0
class MQ7:
    """Detector de gas MQ7"""

    # resistencia del sensor
    LOAD_RESISTANCE = 1

    # pin salida digital
    DOutput = Button(25)

    # Ro value of the sensor
    ro = 0.255

    # Curva logaritmica con base 10
    HIDROGENO_CURVA = [1.698970, 0.133539, -0.619674]
    CO_CURVA = [1.698970, 0.220108, -0.790349]

    # gas en ppm
    hidrogeno_ppm = 0
    co_ppm = 0
    voltaje = 0
    funcionamiento = True

    # Contadores
    voltaje_acumulado = 0
    cantidad_muestras = 0

    def __init__(self, convertor=ads1115.ADS1115, pin=PIN, address=ADDRESS):
        '''Inicia con la lectura periodica de gases del sensor MQ7'''

        # Permique que el resto del programa sigua funcionando aunque el ADC no funcione
        try:
            i2c = busio.I2C(board.SCL, board.SDA)
            adc = convertor(i2c=i2c, address=address)

            self.channel = AnalogIn(adc, pin)

            self.sched_mq7 = BackgroundScheduler()
            self.sched_mq7.add_job(self.leer_voltaje,
                                   'interval',
                                   seconds=5,
                                   id="sched_mq7")
            self.sched_mq7.start()

        except (ValueError, OSError):
            self.funcionamiento = False

            logging.error('El ADC del MQ7 ha dejado de funcionar.')

    def leer_voltaje(self):
        '''leerá el voltaje cada cierto tiempo y lo irá acumulando, además
        lleva la cuenta de cuantos ciclos de lectura lleva.'''

        # Evita que se rompa el programa en caso de que el ADC deje de funcionar a mitad
        # de una lectura de voltaje.
        try:
            self.voltaje_acumulado += self.channel.voltage
            self.cantidad_muestras += 1

        except (ValueError, OSError):
            self.funcionamiento = False
            logging.error('El ADC del MQ7 ha dejado de funcionar.')

            self.sched_mq7.remove_job("sched_mq7")

    def finalizar_ciclo(self):
        '''Llama a las funciones necesarias para finalizar el ciclo de lectura
        de datos y empezar uno nuevo'''

        self.calcular_concentracion()

        self.reiniciar_contadores()

    def calcular_concentracion(self):
        '''Hace los calculos necesarios para obtener las concentraciones
        de los gases en ppm'''

        calculos = Calculos()

        self.voltaje = calculos.calcular_voltaje(self.voltaje_acumulado,
                                                 self.cantidad_muestras)

        rs = calculos.calcular_resistencia(self.voltaje, self.LOAD_RESISTANCE)

        ratio = rs / self.ro

        self.hidrogeno_ppm = calculos.calcular_ppm(ratio, self.HIDROGENO_CURVA)

        self.co_ppm = calculos.calcular_ppm(ratio, self.CO_CURVA)

    def activar_alarma(self):
        '''Pasa la variable alarma a True en el caso de que el pin digital
        envien una señal.'''

        global alarma_mq7
        alarma_mq7 = 1

    def reiniciar_contadores(self):
        '''Reinicia los valores de los contadores para tomar nuevas lecturas.'''

        global alarma_mq7

        self.voltaje_acumulado = 0
        self.cantidad_muestras = 0
        alarma_mq7 = 0

    def get_hidrogeno(self):
        '''Devuelve un string de la variable hidrogeno_ppm con tres decimales'''

        return self.hidrogeno_ppm

    def get_co(self):
        '''Devuelve un string de la variable co_ppm con tres decimales'''

        return self.co_ppm

    def get_alarma(self):
        '''Devuelve un String de la variable alarma'''

        global alarma_mq7

        return str(alarma_mq7)

    def get_voltaje(self):
        '''Devuelve un String de la variable voltaje'''

        return str(self.voltaje)

    def get_funcionamiento(self):
        '''Nos dice mediante un boolean si el sensor funciona. En el caso de True
        es que el sensor funciona'''

        return self.funcionamiento

    # cuando el sensor detecte una concentración peligrosa lanzará una alarma
    DOutput.when_pressed = activar_alarma
示例#16
0
class MycroftRoutineSkill(MycroftSkill):
    def __init__(self):
        super(MycroftRoutineSkill, self).__init__(name="MycroftRoutineSkill")
        self._in_progress_tasks = dict()
        self._in_progress_tasks_lock = Lock()

    def initialize(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self._routines = defaultdict(dict)
        self._routines.update(self._load_routine_data())

        self._routine_to_sched_id_map = {}
        self._register_routines()

        path = dirname(abspath(__file__))

        path_to_stop_words = join(path, 'vocab', self.lang, 'ThatsAll.voc')
        self._stop_words = self._lines_from_path(path_to_stop_words)

        path_to_cancel_words = join(path, 'vocab', self.lang, 'Cancel.voc')
        self._cancel_words = self._lines_from_path(path_to_cancel_words)

        path_to_days_of_week = join(path, 'vocab', self.lang, 'DaysOfWeek.voc')
        self._days_of_week = self._lines_from_path(path_to_days_of_week)

        self.add_event("mycroft.skill.handler.complete",
                       self._handle_completed_event)

    def _handle_completed_event(self, message):
        task_id = message.context.get("task_id")
        with self._in_progress_tasks_lock:
            if task_id not in self._in_progress_tasks:
                return
            LOGGER.info(task_id + " completed.")
            self._in_progress_tasks[task_id].mark_finished()

    def _await_completion_of_task(self, task_id):
        LOGGER.info("Waiting for " + task_id)
        start = time()
        while start + TIMEOUT_IN_SECONDS > time():
            with self._in_progress_tasks_lock:
                try:
                    if self._in_progress_tasks[task_id].is_done():
                        del (self._in_progress_tasks[task_id])
                        return
                except KeyError:
                    sleep(0.1)
        LOGGER.warn("Timed out wating for {task}".format(task=task_id))
        del (self._in_progress_tasks[task_id])

    def send_message(self, message: str):
        task_id = "{name}.{uuid}".format(name=self.name, uuid=uuid())
        with self._in_progress_tasks_lock:
            self._in_progress_tasks[task_id] = _Task(task_id)
        self.bus.emit(
            Message(msg_type="recognizer_loop:utterance",
                    data={"utterances": [message]},
                    context={"task_id": task_id}))
        return task_id

    def _lines_from_path(self, path):
        with open(path, 'r') as file:
            lines = [line.strip().lower() for line in file]
            return lines

    def _load_routine_data(self):
        try:
            with self.file_system.open(ROUTINES_FILENAME, 'r') as conf_file:
                return json.loads(conf_file.read())
        except FileNotFoundError:
            log_message = "Routines file not found."
        except PermissionError:
            log_message = "Permission denied when reading routines file."
        except json.decoder.JSONDecodeError:
            log_message = "Error decoding json from routines file."
        log_message += " Initializing empty dictionary."
        return {}

    def _register_routines(self):
        for routine in self._routines:
            self._register_routine(routine)

    def _register_routine(self, name):
        self.register_vocabulary(name, "RoutineName")
        schedule = self._routines[name].get('schedule')
        if schedule and self._routines.get('enabled', True):
            self._schedule_routine(name, schedule)

    def _schedule_routine(self, name, cronstring):
        trigger = CronTrigger.from_crontab(cronstring)
        job = self.scheduler.add_job(func=self._run_routine,
                                     args=[name],
                                     trigger=trigger,
                                     name=name)
        self._routine_to_sched_id_map[name] = job.id

    def _write_routine_data(self):
        with self.file_system.open(ROUTINES_FILENAME, 'w') as conf_file:
            conf_file.write(json.dumps(self._routines, indent=4))

    @intent_handler(
        IntentBuilder("CreateRoutine").require("Create").require("Routine"))
    def _create_routine(self, message):
        name = self.get_response("name.it")
        if not name:
            return
        name = name.lower()
        if name in self._cancel_words:
            return

        tasks = self._get_task_list()
        if not tasks:
            return

        self._routines[name]['tasks'] = tasks

        self._write_routine_data()
        self._register_routine(name)
        self.speak_dialog('created', data={"name": name})

    def _get_task_list(self):
        first_task = self.get_response("first.task")
        if not first_task:
            return []
        first_task = first_task.lower()
        if first_task in self._cancel_words:
            return []
        tasks = [first_task]
        while True:
            task = self.get_response("next")
            if not task:
                return []
            task = task.lower()
            if task in self._cancel_words:
                return []
            if task in self._stop_words:
                break
            tasks.append(task)
        return tasks

    @intent_handler(
        IntentBuilder("RunRoutine").optionally("Run").require("RoutineName"))
    def _trigger_routine(self, message):
        name = message.data["RoutineName"]
        self._run_routine(name)

    def _run_routine(self, name):
        for task in self._routines[name]['tasks']:
            task_id = self.send_message(task)
            self._await_completion_of_task(task_id)

    @intent_handler(
        IntentBuilder("ListRoutine").require("List").require("Routines"))
    def _list_routines(self, message):
        if not self._routines:
            self.speak_dialog('no.routines')
            return
        routines = ". ".join(self._routines.keys())
        self.speak_dialog('list.routines')
        self.speak(routines)

    @intent_handler(
        IntentBuilder("DeleteRoutine").require("Delete").require("RoutineName")
    )
    def _delete_routine(self, message):
        name = message.data["RoutineName"]
        del (self._routines[name])
        self._write_routine_data()
        self.speak_dialog('deleted', data={"name": name})

    @intent_handler(
        IntentBuilder("DescribeRoutine").require("Describe").require(
            "RoutineName"))
    def _describe_routine(self, message):
        name = message.data["RoutineName"]
        tasks = ". ".join(self._routines[name]['tasks'])
        self.speak_dialog('describe', data={"name": name})
        self.speak(tasks)

    @intent_handler(
        IntentBuilder("ScheduleRoutine").require("Schedule").require(
            "RoutineName"))
    def _add_routine_schedule(self, message):
        name = message.data["RoutineName"]
        days = self._get_days()
        hour, minute = self._get_time()
        cronstring = self._generate_cronstring(days, hour, minute)
        self._routines[name]['schedule'] = cronstring
        self._routines[name]['enabled'] = True
        self._write_routine_data()
        self._schedule_routine(name, cronstring)
        self.speak_dialog("scheduled", data={'name': name})

    @intent_handler(
        IntentBuilder("DisableRoutine").require("Disable").require(
            "RoutineName"))
    def _disable_scheduled_routine(self, message):
        name = message.data["RoutineName"]
        self._routines[name]['enabled'] = False
        self._write_routine_data()
        self.scheduler.remove_job(self._routine_to_sched_id_map[name])
        self.speak_dialog("disabled", data={"name": name})

    @intent_handler(
        IntentBuilder("EnableRoutine").require("Enable").require("RoutineName")
    )
    def _enable_scheduled_routine(self, message):
        name = message.data["RoutineName"]
        self._routines[name]['enabled'] = True
        self._write_routine_data()
        self._schedule_routine(name, self._routines[name]["schedule"])
        self.speak_dialog("enabled", data={"name": name})

    def _get_days(self):
        days_to_run = []
        days_from_user = self.get_response('which.days')
        if not days_from_user:
            return
        days_from_user = days_from_user.lower()
        for i in range(len(self._days_of_week)):
            if self._days_of_week[i] in days_from_user:
                days_to_run.append(str(i))
        return ','.join(days_to_run)

    def _get_time(self):
        regex = '(?P<hour>[0-9]{1,2})[: ](?P<minute>[0-9]{1,2}) (?P<time_of_day>[ap].?m.?)'
        time_from_user = self.get_response('what.time')
        if not time_from_user:
            return
        time_from_user = time_from_user.lower()
        matches = re.match(regex, time_from_user)

        if not matches:
            self.speak_dialog('could.not.parse.time')
            return

        matches = matches.groupdict()
        hour = int(matches['hour'])
        minute = int(matches['minute'])
        pm = matches['time_of_day'] == 'pm'

        hour = hour % 12
        hour += 12 if pm else 0

        return hour, minute

    def _generate_cronstring(self, days, hour, minute):
        return '{m} {h} * * {d}'.format(m=minute, h=hour, d=days)
示例#17
0
class SchedulerService(rpyc.Service):
    def __init__(self, **config):
        self._scheduler = BackgroundScheduler()
        self._scheduler.configure(**config)
        self._scheduler.start()
        self.logger = logging.getLogger("Heartbeat.core")
        self.logger.info("Heartbeat Core Initalized")

    def on_connect(self, conn):
        # code that runs when a connection is created
        # (to init the service, if needed)
        self.logger.info("----------Begin New Client----------")
        self.logger.info(conn)
        self.logger.info("----------End New Client----------")

    def on_disconnect(self, conn):
        # code that runs after the connection has already closed
        # (to finalize the service, if needed)
        self.logger.info("----------Begin Goodbye Client----------")
        self.logger.info(conn)
        self.logger.info("----------End Goodbye Client----------")

    def exposed_add_job(self, func, *args, **kwargs):
        self.logger.info("----------Begin New Job----------")
        self.logger.info("Function: %s", str(func))
        self.logger.info("*args: %s", str(args))
        self.logger.info("**kwargs: %s", str(dict(kwargs)))
        self.logger.info("----------Eng New Job----------")
        return self._scheduler.add_job(func, *args, **kwargs)

    def exposed_modify_job(self, job_id, jobstore=None, **changes):
        return self._scheduler.modify_job(job_id, jobstore, **changes)

    def exposed_reschedule_job(self,
                               job_id,
                               jobstore=None,
                               trigger=None,
                               **trigger_args):
        return self._scheduler.reschedule_job(job_id, jobstore, trigger,
                                              **trigger_args)

    def exposed_pause_job(self, job_id, jobstore=None):
        return self._scheduler.pause_job(job_id, jobstore)

    def exposed_resume_job(self, job_id, jobstore=None):
        return self._scheduler.resume_job(job_id, jobstore)

    def exposed_remove_job(self, job_id, jobstore=None):
        self._scheduler.remove_job(job_id, jobstore)

    def exposed_get_job(self, job_id, jobstore=None):
        return self._scheduler.get_job(job_id, jobstore=jobstore)

    def exposed_get_jobs(self, jobstore=None):
        results = self._scheduler.get_jobs(jobstore)
        return results

    def exposed_get_tasks(self):
        """Return a list of schedule-able function"""
        tasks = []
        for module_file in os.listdir(
                os.path.join(os.path.dirname(__file__), "task")):
            if module_file == "__init__.py" or module_file[-3:] != ".py":
                continue
            module_name = "server.task.{}".format(module_file[:-3])
            module = importlib.import_module(module_name)
            if not hasattr(module, "__all__"):
                continue
            for function_name in module.__all__:
                function = getattr(module, function_name)
                if not callable(function):
                    continue
                parameters = inspect.signature(function).parameters
                parameters_str = ", ".join(
                    [str(val) for key, val in parameters.items()])
                tasks.append("{}:{}({})".format(module_name, function_name,
                                                parameters_str))
        return tasks
示例#18
0
class Scheduler():
    jobstores = {'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')}

    executors = {'default': ThreadPoolExecutor(1)}

    job_defaults = {'coalesce': True, 'max_instances': 1}

    periods = {
        's': 'second',
        'm': 'minute',
        'h': 'hour',
        'd': 'day',
        'w': 'week',
        'M': 'month'
    }

    def __init__(self):
        self._apscheduler = BackgroundScheduler(jobstores=self.jobstores,
                                                executors=self.executors,
                                                job_defaults=self.job_defaults)

    def parse_frequency(self, freq):
        freq_regex = re.compile("(\d{1,3})((s|m|h|d|w|M))")
        match = freq_regex.match(freq)

        if match is None:
            raise FrequencyException('time frequency not supported')

        num = match.group(1)
        period = match.group(2)

        return int(num), period

    def get_cron_kwarg(self, num, period):
        return {self.periods[period]: '*/%d' % num}

    def add_job(self, func, freq, job_id, args=None):
        num, period = self.parse_frequency(freq)
        cron_kwarg = self.get_cron_kwarg(num, period)

        self._apscheduler.add_job(func,
                                  'cron',
                                  id=job_id,
                                  args=args,
                                  **cron_kwarg)

    def remove_job(self, job_id):
        self._apscheduler.remove_job(job_id)

    def job_exists(self, job_id):
        jobs = self._apscheduler.get_jobs()
        return any([job.id == job_id for job in jobs])

    def start(self):
        try:
            self._apscheduler.start()
        except SchedulerAlreadyRunningError as e:
            log.error(str(e))

    def pause(self):
        self.shutdown()

    def shutdown(self):
        try:
            self._apscheduler.shutdown()
        except SchedulerNotRunningError as e:
            log.error(str(e))
示例#19
0
class Scheduler(Flask):

    days = {
        "0": "sun",
        "1": "mon",
        "2": "tue",
        "3": "wed",
        "4": "thu",
        "5": "fri",
        "6": "sat",
        "7": "sun",
        "*": "*",
    }

    seconds = {"seconds": 1, "minutes": 60, "hours": 3600, "days": 86400}

    def __init__(self):
        super().__init__(__name__)
        with open(Path.cwd().parent / "setup" / "scheduler.json", "r") as file:
            self.settings = load(file)
        dictConfig(self.settings["logging"])
        self.configure_scheduler()
        self.register_routes()

    @staticmethod
    def aps_date(date):
        if not date:
            return
        date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
        return datetime.strftime(date, "%Y-%m-%d %H:%M:%S")

    def configure_scheduler(self):
        self.scheduler = BackgroundScheduler(self.settings["config"])
        self.scheduler.start()

    def register_routes(self):
        @self.route("/delete_job/<job_id>", methods=["POST"])
        def delete_job(job_id):
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
            return jsonify(True)

        @self.route("/next_runtime/<task_id>")
        def next_runtime(task_id):
            job = self.scheduler.get_job(task_id)
            if job and job.next_run_time:
                return jsonify(job.next_run_time.strftime("%Y-%m-%d %H:%M:%S"))
            return jsonify("Not Scheduled")

        @self.route("/schedule", methods=["POST"])
        def schedule():
            if request.json["mode"] in ("resume", "schedule"):
                result = self.schedule_task(request.json["task"])
                if not result:
                    return jsonify({"alert": "Cannot schedule in the past."})
                else:
                    return jsonify({
                        "response": "Task resumed.",
                        "active": True
                    })
            else:
                try:
                    self.scheduler.pause_job(request.json["task"]["id"])
                    return jsonify({"response": "Task paused."})
                except JobLookupError:
                    return jsonify(
                        {"alert": "There is no such job scheduled."})

        @self.route("/time_left/<task_id>")
        def time_left(task_id):
            job = self.scheduler.get_job(task_id)
            if job and job.next_run_time:
                delta = job.next_run_time.replace(tzinfo=None) - datetime.now()
                hours, remainder = divmod(delta.seconds, 3600)
                minutes, seconds = divmod(remainder, 60)
                days = f"{delta.days} days, " if delta.days else ""
                return jsonify(f"{days}{hours}h:{minutes}m:{seconds}s")
            return jsonify("Not Scheduled")

    @staticmethod
    def run_service(task_id):
        post(
            f"{environ.get('ENMS_ADDR')}/rest/run_task",
            json=task_id,
            auth=HTTPBasicAuth(environ.get("ENMS_USER"),
                               environ.get("ENMS_PASSWORD")),
            verify=int(environ.get("VERIFY_CERTIFICATE", 1)),
        )

    def schedule_task(self, task):
        if task["scheduling_mode"] == "cron":
            crontab = task["crontab_expression"].split()
            crontab[-1] = ",".join(self.days[day]
                                   for day in crontab[-1].split(","))
            trigger = {"trigger": CronTrigger.from_crontab(" ".join(crontab))}
        elif task["frequency"]:
            trigger = {
                "trigger":
                "interval",
                "start_date":
                self.aps_date(task["start_date"]),
                "end_date":
                self.aps_date(task["end_date"]),
                "seconds":
                int(task["frequency"]) * self.seconds[task["frequency_unit"]],
            }
        else:
            trigger = {
                "trigger": "date",
                "run_date": self.aps_date(task["start_date"])
            }
        if not self.scheduler.get_job(task["id"]):
            job = self.scheduler.add_job(
                id=str(task["id"]),
                replace_existing=True,
                func=self.run_service,
                args=[task["id"]],
                **trigger,
            )
        else:
            job = self.scheduler.reschedule_job(str(task["id"]), **trigger)
        return job.next_run_time > datetime.now(job.next_run_time.tzinfo)
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    class NoRunningFilter(logging.Filter):
        def filter(self, record):
            return not record.msg.startswith('Execution')

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

        my_filter = self.NoRunningFilter()
        logging.getLogger("apscheduler.scheduler").addFilter(my_filter)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        loaded = False
        for _ in range(10):
            try:
                with open(fp) as in_data:
                    data = json.load(in_data)
                    # default will load JOB_NAME parameter in Jenkins created json file
                    data['name'] = data.get('JOB_NAME', "Jenkins Job")
                    data['path'] = fp
                    loaded = True
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            except Exception as e:
                logger.warning(
                    "File is not ready. Wait 1 second for another try.")
                time.sleep(1)

        if not loaded:
            logger.warning(fp + " is not ready for 10 seconds.")
            return None

        # load interval value from Jenkins created json file (default : 30 )
        interval = int(data.get('interval', 30))

        # load outputpath and defaultoutputpath from Jenkins created json file
        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(
                    self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name)

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = data.get('AGENT_MODULE_PATH', "hasalTask")
            object_name = data.get('AGENT_OBJECT_NAME', "HasalTask")
            try:
                runner_module = getattr(importlib.import_module(module_path),
                                        object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run,
                                   'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval)
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
class SchedulerService(Service):
    def __init__(self, env, db, collection, worker_num):
        super(SchedulerService, self).__init__(env)
        self._db_name = db
        self._collection_name = collection
        self._worker_num = worker_num
        self._sched = None
        global _cur_sched
        _cur_sched = self

    def on_active(self):
        super(SchedulerService, self).on_active()
        self._init_scheduler()

    def on_inactive(self):
        if self._sched is not None:
            self._sched.shutdown()

    def _init_scheduler(self):
        jobstores = {
            'default': MongoDBJobStore(self._db_name, self._collection_name,
                                       host=self.mongodb_service.get_connection_info()),
        }
        executors = {
            'default': ThreadPoolExecutor(self._worker_num),
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': 3
        }
        self._sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
        self._sched.start()

    def add_job(self, handler, dt, args=None, kwargs=None, job_id=None, replace_existing=True):
        '''
        增加单一任务。执行即消亡
        :param handler: 回调函数。需要考虑多线程问题
                        def handler(env, *args, **kwargs):pass
        :param dt: datetime
        :param job_id: string, 自定义id
        :param kwargs: handler的参数
        :return: job id
        '''
        args = args or []
        args.insert(0, handler)
        job = self._sched.add_job(dispatch_expire_job, 'date', run_date=dt, id=job_id, args=args, kwargs=kwargs,
                                  replace_existing=replace_existing)
        return job.id

    def add_interval_job(self, handler,
                         weeks=0, days=0, hours=0, minutes=0, seconds=0, args=None, kwargs=None, job_id=None,
                         replace_existing=True):
        '''
        增加定期任务
        :param handler: 回调函数。需要考虑多线程问题.
                        def handler(env, *args, **kwargs):pass
        :param weeks:
        :param days:
        :param hours:
        :param minutes:
        :param seconds:
        :param job_id: string, 自定义id
        :param kwargs: handler的参数
        :return: job id
        '''
        args = args or []
        args.insert(0, handler)
        job = self._sched.add_job(dispatch_expire_job, 'interval', weeks=weeks, days=days, hours=hours,
                                  minutes=minutes, seconds=seconds, id=job_id, args=args, kwargs=kwargs,
                                  replace_existing=replace_existing)
        return job.id

    def remove_job(self, job_id):
        try:
            self._sched.remove_job(job_id)
        except JobLookupError:
            pass

    def pause_job(self, job_id):
        self._sched.pause_job(job_id)

    def resume_job(self, job_id):
        self._sched.resume_job(job_id)
示例#22
0
class Scheduler:

    def __init__(self):
        self.__running_tasks = []

        self.aps_scheduler = BackgroundScheduler()

        # task listener
        def task_listener_add(event):
            if event.job_id not in self.__running_tasks:
                self.__running_tasks.append(event.job_id)
                event_stream(type='task')

        def task_listener_remove(event):
            if event.job_id in self.__running_tasks:
                self.__running_tasks.remove(event.job_id)
                event_stream(type='task')

        self.aps_scheduler.add_listener(task_listener_add, EVENT_JOB_SUBMITTED)
        self.aps_scheduler.add_listener(task_listener_remove, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        # configure all tasks
        self.__cache_cleanup_task()
        self.__check_health_task()
        self.update_configurable_tasks()

        self.aps_scheduler.start()

    def update_configurable_tasks(self):
        self.__sonarr_update_task()
        self.__radarr_update_task()
        self.__sonarr_full_update_task()
        self.__radarr_full_update_task()
        self.__update_bazarr_task()
        self.__search_wanted_subtitles_task()
        self.__upgrade_subtitles_task()
        self.__randomize_interval_task()
        self.__automatic_backup()
        if args.no_tasks:
            self.__no_task()

    def add_job(self, job, name=None, max_instances=1, coalesce=True, args=None, kwargs=None):
        self.aps_scheduler.add_job(
            job, DateTrigger(run_date=datetime.now()), name=name, id=name, max_instances=max_instances,
            coalesce=coalesce, args=args, kwargs=kwargs)

    def execute_job_now(self, taskid):
        self.aps_scheduler.modify_job(taskid, next_run_time=datetime.now())

    def get_running_tasks(self):
        return self.__running_tasks

    def get_task_list(self):
        def get_time_from_interval(td_object):
            seconds = int(td_object.total_seconds())
            periods = [
                ('year', 60 * 60 * 24 * 365),
                ('month', 60 * 60 * 24 * 30),
                ('day', 60 * 60 * 24),
                ('hour', 60 * 60),
                ('minute', 60),
                ('second', 1)
            ]

            strings = []
            for period_name, period_seconds in periods:
                if seconds > period_seconds:
                    period_value, seconds = divmod(seconds, period_seconds)
                    has_s = 's' if period_value > 1 else ''
                    strings.append("%s %s%s" % (period_value, period_name, has_s))

            return ", ".join(strings)

        def get_time_from_cron(cron):
            year = str(cron[0])
            if year == "2100":
                return "Never"

            day = str(cron[4])
            hour = str(cron[5])

            if day == "*":
                text = "everyday"
            else:
                text = "every " + day_name[int(day)]

            if hour != "*":
                text += " at " + hour + ":00"

            return text

        task_list = []
        for job in self.aps_scheduler.get_jobs():
            next_run = 'Never'
            if job.next_run_time:
                next_run = pretty.date(job.next_run_time.replace(tzinfo=None))
            if isinstance(job.trigger, CronTrigger):
                if job.next_run_time and str(job.trigger.__getstate__()['fields'][0]) != "2100":
                    next_run = pretty.date(job.next_run_time.replace(tzinfo=None))

            if job.id in self.__running_tasks:
                running = True
            else:
                running = False

            if isinstance(job.trigger, IntervalTrigger):
                interval = "every " + get_time_from_interval(job.trigger.__getstate__()['interval'])
                task_list.append({'name': job.name, 'interval': interval, 'next_run_in': next_run,
                                  'next_run_time': next_run, 'job_id': job.id, 'job_running': running})
            elif isinstance(job.trigger, CronTrigger):
                task_list.append({'name': job.name, 'interval': get_time_from_cron(job.trigger.fields),
                                  'next_run_in': next_run, 'next_run_time': next_run, 'job_id': job.id,
                                  'job_running': running})

        return task_list

    def __sonarr_update_task(self):
        if settings.general.getboolean('use_sonarr'):
            self.aps_scheduler.add_job(
                update_series, IntervalTrigger(minutes=int(settings.sonarr.series_sync)), max_instances=1,
                coalesce=True, misfire_grace_time=15, id='update_series', name='Update Series list from Sonarr',
                replace_existing=True)
            self.aps_scheduler.add_job(
                sync_episodes, IntervalTrigger(minutes=int(settings.sonarr.episodes_sync)), max_instances=1,
                coalesce=True, misfire_grace_time=15, id='sync_episodes', name='Sync episodes with Sonarr',
                replace_existing=True)

    def __radarr_update_task(self):
        if settings.general.getboolean('use_radarr'):
            self.aps_scheduler.add_job(
                update_movies, IntervalTrigger(minutes=int(settings.radarr.movies_sync)), max_instances=1,
                coalesce=True, misfire_grace_time=15, id='update_movies', name='Update Movie list from Radarr',
                replace_existing=True)

    def __cache_cleanup_task(self):
        self.aps_scheduler.add_job(cache_maintenance, IntervalTrigger(hours=24), max_instances=1, coalesce=True,
                                   misfire_grace_time=15, id='cache_cleanup', name='Cache maintenance')

    def __check_health_task(self):
        self.aps_scheduler.add_job(check_health, IntervalTrigger(hours=6), max_instances=1, coalesce=True,
                                   misfire_grace_time=15, id='check_health', name='Check health')

    def __automatic_backup(self):
        backup = settings.backup.frequency
        if backup == "Daily":
            self.aps_scheduler.add_job(
                backup_to_zip, CronTrigger(hour=settings.backup.hour), max_instances=1, coalesce=True,
                misfire_grace_time=15, id='backup', name='Backup database and configuration file',
                replace_existing=True)
        elif backup == "Weekly":
            self.aps_scheduler.add_job(
                backup_to_zip, CronTrigger(day_of_week=settings.backup.day, hour=settings.backup.hour),
                max_instances=1, coalesce=True, misfire_grace_time=15, id='backup',
                name='Backup database and configuration file', replace_existing=True)
        elif backup == "Manually":
            try:
                self.aps_scheduler.remove_job(job_id='backup')
            except JobLookupError:
                pass

    def __sonarr_full_update_task(self):
        if settings.general.getboolean('use_sonarr'):
            full_update = settings.sonarr.full_update
            if full_update == "Daily":
                self.aps_scheduler.add_job(
                    update_all_episodes, CronTrigger(hour=settings.sonarr.full_update_hour), max_instances=1,
                    coalesce=True, misfire_grace_time=15, id='update_all_episodes',
                    name='Update all Episode Subtitles from disk', replace_existing=True)
            elif full_update == "Weekly":
                self.aps_scheduler.add_job(
                    update_all_episodes,
                    CronTrigger(day_of_week=settings.sonarr.full_update_day, hour=settings.sonarr.full_update_hour),
                    max_instances=1, coalesce=True, misfire_grace_time=15, id='update_all_episodes',
                    name='Update all Episode Subtitles from disk', replace_existing=True)
            elif full_update == "Manually":
                self.aps_scheduler.add_job(
                    update_all_episodes, CronTrigger(year='2100'), max_instances=1, coalesce=True,
                    misfire_grace_time=15, id='update_all_episodes',
                    name='Update all Episode Subtitles from disk', replace_existing=True)

    def __radarr_full_update_task(self):
        if settings.general.getboolean('use_radarr'):
            full_update = settings.radarr.full_update
            if full_update == "Daily":
                self.aps_scheduler.add_job(
                    update_all_movies, CronTrigger(hour=settings.radarr.full_update_hour), max_instances=1,
                    coalesce=True, misfire_grace_time=15,
                    id='update_all_movies', name='Update all Movie Subtitles from disk', replace_existing=True)
            elif full_update == "Weekly":
                self.aps_scheduler.add_job(
                    update_all_movies,
                    CronTrigger(day_of_week=settings.radarr.full_update_day, hour=settings.radarr.full_update_hour),
                    max_instances=1, coalesce=True, misfire_grace_time=15, id='update_all_movies',
                    name='Update all Movie Subtitles from disk', replace_existing=True)
            elif full_update == "Manually":
                self.aps_scheduler.add_job(
                    update_all_movies, CronTrigger(year='2100'), max_instances=1, coalesce=True, misfire_grace_time=15,
                    id='update_all_movies', name='Update all Movie Subtitles from disk', replace_existing=True)

    def __update_bazarr_task(self):
        if not args.no_update and os.environ["BAZARR_VERSION"] != '':
            task_name = 'Update Bazarr'

            if settings.general.getboolean('auto_update'):
                self.aps_scheduler.add_job(
                    check_if_new_update, IntervalTrigger(hours=6), max_instances=1, coalesce=True,
                    misfire_grace_time=15, id='update_bazarr', name=task_name, replace_existing=True)
            else:
                self.aps_scheduler.add_job(
                    check_if_new_update, CronTrigger(year='2100'), hour=4, id='update_bazarr', name=task_name,
                    replace_existing=True)
                self.aps_scheduler.add_job(
                    check_releases, IntervalTrigger(hours=3), max_instances=1, coalesce=True, misfire_grace_time=15,
                    id='update_release', name='Update Release Info', replace_existing=True)

        else:
            self.aps_scheduler.add_job(
                check_releases, IntervalTrigger(hours=3), max_instances=1, coalesce=True, misfire_grace_time=15,
                id='update_release', name='Update Release Info', replace_existing=True)

    def __search_wanted_subtitles_task(self):
        if settings.general.getboolean('use_sonarr'):
            self.aps_scheduler.add_job(
                wanted_search_missing_subtitles_series, IntervalTrigger(hours=int(settings.general.wanted_search_frequency)),
                max_instances=1, coalesce=True, misfire_grace_time=15, id='wanted_search_missing_subtitles_series',
                name='Search for wanted Series Subtitles', replace_existing=True)
        if settings.general.getboolean('use_radarr'):
            self.aps_scheduler.add_job(
                wanted_search_missing_subtitles_movies, IntervalTrigger(hours=int(settings.general.wanted_search_frequency_movie)),
                max_instances=1, coalesce=True, misfire_grace_time=15, id='wanted_search_missing_subtitles_movies',
                name='Search for wanted Movies Subtitles', replace_existing=True)

    def __upgrade_subtitles_task(self):
        if settings.general.getboolean('upgrade_subs') and \
                (settings.general.getboolean('use_sonarr') or settings.general.getboolean('use_radarr')):
            self.aps_scheduler.add_job(
                upgrade_subtitles, IntervalTrigger(hours=int(settings.general.upgrade_frequency)), max_instances=1,
                coalesce=True, misfire_grace_time=15, id='upgrade_subtitles',
                name='Upgrade previously downloaded Subtitles', replace_existing=True)

    def __randomize_interval_task(self):
        for job in self.aps_scheduler.get_jobs():
            if isinstance(job.trigger, IntervalTrigger):
                self.aps_scheduler.modify_job(job.id, next_run_time=datetime.now() + timedelta(seconds=randrange(job.trigger.interval.total_seconds()*0.75, job.trigger.interval.total_seconds())))

    def __no_task(self):
        for job in self.aps_scheduler.get_jobs():
            self.aps_scheduler.modify_job(job.id, next_run_time=None)
示例#23
0
class Scheduler(object):
  def __init__(self):
    self._scheduler = BackgroundScheduler()
    self._scheduler.start()

  def runCron(self, function, minute=None, hour=None, day=None, month=None, day_week=None, year=None, job_id=None, args=[], kwargs={}):
    if job_id is None:
      job_id=str(function)
    logging.info('adding cron job: {}'.format(str(job_id)))
    self._scheduler.add_job(function, 'cron', args=args, kwargs=kwargs, minute=minute, hour=hour, day=None, month=month, day_of_week=day_week, year=year, id=job_id, replace_existing=True)

  #TODO: Build cron class
  def runSimpleWeekCron(self, function, minute=None, hour=None, days_of_week=None, job_id=None, args=[], kwargs={}):
    '''days_of_week takes a list of days'''
    if job_id is None:
      job_id=str(function)
    if days_of_week is not None and days_of_week != '*':
      run_days = []
      for d in DAYS_OF_WEEK:
        if d in days_of_week:
          run_days.append(d)
      days_of_week = ','.join(run_days)
    logging.info('adding simple weekly cron job: {}'.format(str(job_id)))
    self._scheduler.add_job(function, 'cron', args=args, kwargs=kwargs, minute=minute, hour=hour, day='*', month='*', day_of_week=days_of_week, year='*', id=job_id, replace_existing=True)


  def runEveryS(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runEveryS job: {}'.format(str(job_id)))
    self._scheduler.add_job(function, 'interval', seconds=delay, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runEveryM(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runEveryM job: {}'.format(str(job_id)))
    self._scheduler.add_job(function, 'interval', minutes=delay, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runEveryH(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runEveryH job: {}'.format(str(job_id)))
    self._scheduler.add_job(function, 'interval', hours=delay, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runInS(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runInS job: {}'.format(str(job_id)))
    run_time = datetime.now() + timedelta(seconds=delay)
    self._scheduler.add_job(function, 'date', run_date=run_time, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runInM(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runInM job: {}'.format(str(job_id)))
    run_time = datetime.now() + timedelta(minutes=delay)
    self._scheduler.add_job(function, 'date', run_date=run_time, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runInH(self,delay, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runInH job: {}'.format(str(job_id)))
    run_time = datetime.now() + timedelta(hours=delay)
    self._scheduler.add_job(function, 'date', run_date=run_time, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def runAt(self, date, function, args=[], kwargs={}, job_id=None, replace=True):
    if job_id is None:
      job_id=str(function)
    logging.info('runAt job: {} date: {}'.format(str(job_id),str(date)))
    self._scheduler.add_job(function, 'date', run_date=date, args=args, kwargs=kwargs, id=job_id, replace_existing=replace)

  def cancel(self, job_id):
    try:
      logging.info('canceling job: {}'.format(str(job_id)))
      self._scheduler.remove_job(job_id)
      return True
    except:
      return False
示例#24
0

def myfunc():
    print("myfunc O(∩_∩)O")
    import random
    if random.random() > 0.6:
        raise "hey raise"


def my_listener(event):
    if event.exception:
        log_job = {
            "code": event.code,
            "jobid": event.job_id,
            "jobstore": event.jobstore
        }
        print(f'The job {event.job_id} crashed :( | {log_job}')
    else:
        print(f'The job {event.job_id} worked :)')


scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

scheduler.start()
scheduler.add_job(myfunc, 'interval', seconds=1, id='test_job_id')  # minutes=1
print(f"scheduler.get_jobs: {scheduler.get_jobs()}")

import time
time.sleep(6)
scheduler.remove_job('test_job_id')
scheduler.shutdown(wait=True)
示例#25
0
class ScheduleCommand(Command):
    schedule_sender = None
    def __init__(self, logger, message_sender):
        super().__init__(logger, message_sender)
        # Message scheduler configuration
        self.scheduler = BackgroundScheduler()
        configuration = self.message_sender.get_configuration()
        sdb_url = configuration.get('Message Scheduler', 'db_path')
        self.scheduler.add_jobstore('sqlalchemy', url=sdb_url)
        self.scheduler.start()
        ScheduleCommand.schedule_sender = message_sender

    def process(self, chat_id, user_id, username, arguments):
        if len(arguments) < 1:
            return self.help()
        
        operation = arguments[0] 
        command_arguments = arguments[1:]

        if operation == "add":
            return self.add_message(username, chat_id, command_arguments)
        elif operation == "time":
            return self.get_local_time()
        elif operation == "remove":
            return self.remove_message(username, command_arguments)
        else:
            return self.help()

    def get_local_time(self):
        date_str =  str(datetime.today().strftime('%Y-%m-%d %H:%M:%S'))
        return "My time is {}".format(date_str)

    def add_message(self, username, chat_id, arguments):
        if len(arguments) <2:
            return self.help()
        date = None
        message = None
        if arguments[0] == 'relative':
            if len(arguments)<3:
                return self.help()
            date = self.on_delta_parse(arguments[1])
            message = arguments[2]
        else:
            date = self.on_date_parse(arguments[0])
            message = arguments[1]
        
        if message.startswith('"') and message.endswith('"'):
            message = message[1:-1]

        reference = self.user_reference(username)
        if date is None:
            return "{}Date format not recognized".format(reference)

        current_date = datetime.today()
        if date < current_date:
            current_str = self.get_human_string_date(current_date)
            return "{}Sorry, I can't travel to the past, my current date is: {}".format(reference, current_str)
 
        message_id = str(random.randrange(0, 999999999))
        self.scheduler.add_job(ScheduleCommand.send_programmed_message, \
                'date', run_date=date, args=[username, \
                chat_id, message], id = message_id) 
        
        date_str = self.get_human_string_date(date)
        return "{}The message [{}] has been successfully scheduled on {}"\
                .format(reference,  message_id, date_str)

    def send_programmed_message(username, chat_id, message):
        reference = ScheduleCommand.user_reference(username)
        text = "{}{}".format(reference, message)
        ScheduleCommand.schedule_sender.send_message(chat_id, text)    
    send_programmed_message = staticmethod(send_programmed_message)

    def remove_message(self, username, arguments): 
        if len(arguments) < 1:
            return self.help()
        
        founds = 0
        ids = self.get_comma_arguments(arguments[0])
        
        for message_id in ids:
            job = self.scheduler.get_job(message_id)
            if job == None:
                continue
            if job.args[0] != username:
                continue
            self.scheduler.remove_job(message_id)
            founds += 1
        
        reference = self.user_reference(username)
        if founds == len(ids):
            if len(ids) ==1:
                return "{}The scheduled message was canceled.".format(reference)
            else:
                return "{}The scheduled messages were canceled.".format(reference)
        elif founds > 0:
            return "{}Some scheduled messages were canceled, but some others were not found or you aren't the owner.".format(reference)
        else:
            if len(ids) == 1:
                return "{}The scheduled message was not found or you aren't the owner.".format(reference)
            else:
                return "{}The scheduled message were not found or you aren't the owner.".format(reference)

    def get_human_string_date(self,datetime):
        return str(datetime.strftime('%Y-%m-%d %H:%M:%S'))

    def on_delta_parse(self, text_date):
        regex = re.compile(r'^((?P<days>\d+?)d)?((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?$')
        parts = regex.match(text_date)
        if not parts:
            return None
        parts = parts.groupdict()
        time_params = {}
        valid_params = 0
        for (name, param) in parts.items():
            if param:
                time_params[name] = int(param)
                valid_params = valid_params + 1
        if valid_params == 0:
            return None
        return datetime.now() + timedelta(**time_params)

    def on_date_parse(self, text_date):
        try:
            return parser.parse(text_date, dayfirst = True, yearfirst = True)
        except ValueError:
            return None

    def user_reference(username):
        if username is None:
            return ""
        else:
            return "@{}: ".format(username)
    user_reference = staticmethod(user_reference)

    def help(self):
        self.logger.info("Printing help for schedule message command.")
        return self.get_file_help(__file__, "schedule_message.man")
    def name(self):
        return "schedule_message"
    
    def description(self):
        return "Schedule a message to be sent in a specified date."     
示例#26
0
文件: flask1.py 项目: ziphead/Exoviz
#


@app.before_request
def before_request():
    db.connect()
    logging.info('connection opened')


@app.after_request
def after_request(response):
    db.close()
    logging.info('connection closed')
    return response


if __name__ == '__main__':
    logging.basicConfig(format='%(asctime)s - %(levelname)s- %(message)s',
                        filename='error.log',
                        level=logging.INFO)
    app.secret_key = 'super_secret_key'
    app.debug = True
    scheduler = BackgroundScheduler()
    scheduler.add_job(Control().update, 'interval', hours=24, id='updates')
    try:
        scheduler.start()
        app.run(host='0.0.0.0', port=8080)
    except (KeyboardInterrupt, SystemExit):
        scheduler.remove_job('updates')
        scheduler.shutdown()
示例#27
0
class SchedulerUtil(object):
    # 类属性:记录实例是否存在
    instance = None

    # 初始化执行标志
    init_flag = False

    # 创建对象,首先会为对象分配空间
    def __new__(cls, *args, **kwargs):
        # 判断类属性是否为空对象
        if cls.instance is None:
            # 调用父类方法,为对象分配空间
            logger.info("SchedulerUtil-->分配空间")
            cls.instance = super().__new__(cls)
        # 返回对象的引用
        return cls.instance

    # 分配空间后,对象属性初始化
    def __init__(self):
        # 判断是否执行过初始化动作
        if SchedulerUtil.init_flag:
            return
        logger.info("SchedulerUtil-->对象属性初始化")
        self.scheduler = BackgroundScheduler()
        SchedulerUtil.init_flag = True

    def __del__(self):
        logger.info("del done!")

    @classmethod
    def flush(cls):
        """强制回收对象"""
        cls.instance = None
        SchedulerUtil.init_flag = False

    def add_job(self, jobid, **kwargs):
        logger.info("添加job----> %s" % jobid)
        self.scheduler.add_job(eval("self.%s" % jobid),
                               'cron',
                               **kwargs,
                               id=jobid)

    def remove_job(self, jobid):
        logger.info("删除job----> %s" % jobid)
        self.scheduler.remove_job(jobid)

    def start_scheduler(self):
        """启动调度器

        :return:
        """
        logger.info("启动调度器")
        self.scheduler.start()
        logger.info("调度器状态:%s" % self.scheduler.state)

    def shutdown_scheduler(self):
        """停止调度器

        :return:
        """
        logger.info("停止调度器")
        self.scheduler.shutdown(wait=False)

    def get_scheduler_status(self):
        """获取调度器状态

        :return:
        """
        logger.info("获取调度器状态:%s" % self.scheduler.state)
        return self.scheduler.state

    def get_scheduler_job(self, jobid):
        """获取调度中的任务

        :return:
        """
        return self.scheduler.get_job(jobid)

    def get_scheduler_jobs(self):
        """获取调度中的任务列表

        :return:
        """
        return self.scheduler.get_jobs()

    def exam_job(self):
        """考试信息任务

        :return:
        """
        logger.info("exam_job正在执行中...")
        exam_server.ExamServer().run()

    def employ_job(self):
        """招聘信息任务

        :return:
        """
        logger.info("employ_job正在执行中...")
        zhaopin_server.ZhaopinServer().run()

    def anjuke_job(self):
        """安居客数据任务

        :return:
        """
        logger.info("anjuke_job正在执行中...")
        anjuke_server.AnJuKeServer().run()

    def mafengwo_job(self):
        """马蜂窝数据任务(测试任务)

        :return:
        """
        logger.info("mafengwo_job正在执行中...")
        mafengwo_server.MaFengWoServer("西安").run()
示例#28
0
class BileanScheduler(object):
    """Billing scheduler based on apscheduler"""

    job_types = (
        NOTIFY, DAILY, FREEZE,
    ) = (
        'notify', 'daily', 'freeze',
    )
    trigger_types = (DATE, CRON) = ('date', 'cron')

    def __init__(self, **kwargs):
        super(BileanScheduler, self).__init__()
        self._scheduler = BackgroundScheduler()
        self.notifier = notifier.Notifier()
        self.engine_id = kwargs.get('engine_id', None)
        self.context = kwargs.get('context', None)
        if not self.context:
            self.context = bilean_context.get_admin_context()
        if cfg.CONF.bilean_task.store_ap_job:
            self._scheduler.add_jobstore(cfg.CONF.bilean_task.backend,
                                         url=cfg.CONF.bilean_task.connection)

    def init_scheduler(self):
        """Init all jobs related to the engine from db."""
        jobs = db_api.job_get_all(self.context, engine_id=self.engine_id)
        if not jobs:
            LOG.info(_LI("No job found from db"))
            return True
        for job in jobs:
            if self.bilean_scheduler.is_exist(job.id):
                continue
            task_name = "_%s_task" % (job.job_type)
            task = getattr(self, task_name)
            self.bilean_task.add_job(task, job.id,
                                     job_type=job.job_type,
                                     params=job.parameters)

    def add_job(self, task, job_id, trigger_type='date', **kwargs):
        """Add a job to scheduler by given data.

        :param str|unicode user_id: used as job_id
        :param datetime alarm_time: when to first run the job

        """
        mg_time = cfg.CONF.bilean_task.misfire_grace_time
        job_time_zone = cfg.CONF.bilean_task.time_zone
        user_id = job_id.split('-')[1]
        if trigger_type == 'date':
            run_date = kwargs.get('run_date')
            if run_date is None:
                msg = "Param run_date cannot be None for trigger type 'date'."
                raise exception.InvalidInput(reason=msg)
            self._scheduler.add_job(task, 'date',
                                    timezone=job_time_zone,
                                    run_date=run_date,
                                    args=[user_id],
                                    id=job_id,
                                    misfire_grace_time=mg_time)
            return True

        # Add a cron type job
        hour = kwargs.get('hour', None)
        minute = kwargs.get('minute', None)
        if not hour or not minute:
            hour, minute = self._generate_timer()
        self._scheduler.add_job(task, 'cron',
                                timezone=job_time_zone,
                                hour=hour,
                                minute=minute,
                                args=[user_id],
                                id=job_id,
                                misfire_grace_time=mg_time)
        return True

    def modify_job(self, job_id, **changes):
        """Modifies the properties of a single job.

        Modifications are passed to this method as extra keyword arguments.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.modify_job(job_id, **changes)

    def remove_job(self, job_id):
        """Removes a job, preventing it from being run any more.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.remove_job(job_id)

    def start(self):
        LOG.info(_('Starting Billing scheduler'))
        self._scheduler.start()

    def stop(self):
        LOG.info(_('Stopping Billing scheduler'))
        self._scheduler.shutdown()

    def is_exist(self, job_id):
        """Returns if the Job exists that matches the given ``job_id``.

        :param str|unicode job_id: the identifier of the job
        :return: True|False
        """

        job = self._scheduler.get_job(job_id)
        return job is not None

    def _notify_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        msg = {'user': user.id, 'notification': 'The balance is almost use up'}
        self.notifier.info('billing.notify', msg)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'notify'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))
        self._add_freeze_job(user)

    def _daily_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'daily'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _freeze_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'freeze'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _add_notify_job(self, user):
        if not user.rate:
            return False
        total_seconds = user['balance'] / user['rate']
        prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
        notify_seconds = total_seconds - prior_notify_time
        notify_seconds = notify_seconds if notify_seconds > 0 else 0
        run_date = timeutils.utcnow() + timedelta(seconds=notify_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user['id'], self.NOTIFY)
        self.add_job(self._notify_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.NOTIFY,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)

    def _add_freeze_job(self, user):
        if not user.rate:
            return False
        total_seconds = user.balance / user.rate
        run_date = timeutils.utcnow() + timedelta(seconds=total_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user.id, self.FREEZE)
        self.add_job(self._freeze_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.FREEZE,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)
        return True

    def _add_daily_job(self, user):
        job_id = self._generate_job_id(user.id, self.DAILY)
        params = {'hour': random.randint(0, 23),
                  'minute': random.randint(0, 59)}
        self.add_job(self._daily_task, job_id, trigger_type='cron',
                     params=params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.DAILY,
               'engine_id': self.engine_id,
               'parameters': params}
        db_api.job_create(self.context, job)
        return True

    def _delete_all_job(self, user):
        for job_type in self.job_types:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def update_user_job(self, user):
        """Update user's billing job"""
        if user.status not in [user.ACTIVE, user.WARNING]:
            self._delete_all_job(user.id)
            return

        for job_type in self.NOTIFY, self.FREEZE:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

        daily_job_id = self._generate_job_id(user.id, self.DAILY)
        if not self.is_exist(daily_job_id):
            self._add_daily_job(user)

        if user.status == user.ACTIVE:
            self._add_notify_job(user)
        else:
            self._add_freeze_job(user)

    def _generate_timer(self):
        """Generate a random timer include hour and minute."""
        hour = random.randint(0, 23)
        minute = random.randint(0, 59)
        return hour, minute

    def _generate_job_id(self, user_id, job_type):
        """Generate job id by given user_id and job type"""
        return "%s-%s" % (job_type, user_id)
示例#29
0
class ScheduleManager(IScheduleManager):
    def __init__(self, config=None, event_listener=None):
        self.__config = config
        self.__event_listener = event_listener
        jobstores, executors, job_defaults, timezone = self.__get_apscheduler_settings(
        )

        # initial apscheduler
        self.__scheduler = BackgroundScheduler(jobstores=jobstores,
                                               executors=executors,
                                               job_defaults=job_defaults,
                                               timezone=timezone)

        if self.__event_listener:
            self.__scheduler.add_listener(self.__event_listener.event_start,
                                          EVENT_SCHEDULER_STARTED)
            self.__scheduler.add_listener(self.__event_listener.event_shutdown,
                                          EVENT_SCHEDULER_SHUTDOWN)
            self.__scheduler.add_listener(self.__event_listener.event_pause,
                                          EVENT_SCHEDULER_PAUSED)
            self.__scheduler.add_listener(self.__event_listener.event_resume,
                                          EVENT_SCHEDULER_RESUMED)
            self.__scheduler.add_listener(
                self.__event_listener.event_job_submit, EVENT_JOB_SUBMITTED)
            self.__scheduler.add_listener(
                self.__event_listener.event_job_max_instances,
                EVENT_JOB_MAX_INSTANCES)
            self.__scheduler.add_listener(
                self.__event_listener.event_job_execute, EVENT_JOB_EXECUTED)
            self.__scheduler.add_listener(
                self.__event_listener.event_job_error, EVENT_JOB_ERROR)
            self.__scheduler.add_listener(self.__event_listener.event_job_miss,
                                          EVENT_JOB_MISSED)

    def __get_apscheduler_settings(self):
        try:
            jobstore_url = "oracle+cx_oracle://{username}:{password}${host}:{port}/{dbname}".format(
                username=self.__config.db_user,
                password=self.__config.db_pwd,
                host=self.__config.db_host,
                port=self.__config.db_port,
                dbname=self.__config.db_name,
            )

            jobstores = {
                "default":
                SQLAlchemyJobStore(url=jobstore_url,
                                   tablename=self.__config.tablename)
            }

            executors = {
                "default": ThreadPoolExecutor(self.__config.max_workers),
                "processpool": ProcessPoolExecutor(2)
            }

            job_defaults = {
                "coalesce": True,
                "max_instances": 10,
                "misfire_grace_time": 30
            }

            timezone = self.__config.timezone

            return jobstores, executors, job_defaults, timezone

        except Exception as e:
            raise e

    def start(self, paused=False) -> bool:
        try:
            if self.__scheduler.state == STATE_RUNNING:
                return True

            self.__scheduler.start(paused=paused)

        except Exception as e:
            logging.error(f"scheduler start error...... {str(e)}")
            raise e

    def shutdown(self, wait=False) -> bool:
        try:
            self.__scheduler.shutdown(wait=wait)
            if self.__scheduler.state == STATE_STOPPED:
                return True

            return False

        except Exception as e:
            logging.error(f"scheduler shutdown error...... {str(e)}")
            raise e

    def pause(self) -> bool:
        try:
            self.__scheduler.pause()
            if self.__scheduler.state == STATE_PAUSED:
                return True

            return False

        except Exception as e:
            logging.error(f"scheduler pause error...... {str(e)}")
            raise e

    def resume(self) -> bool:
        try:
            self.__scheduler.resume()
            if self.__scheduler.state == STATE_RUNNING:
                return True

            return False

        except Exception as e:
            logging.error(f"scheduler resume error...... {str(e)}")
            raise e

    def get_state(self) -> str:
        try:
            if self.__scheduler.state == STATE_RUNNING:
                return "RUNNING"

            if self.__scheduler.state == STATE_PAUSED:
                return "PAUSED"

            return "STOPPED"

        except Exception as e:
            logging.error(f"scheduler get_state error...... {str(e)}")
            raise e

    def get_job(self, id="") -> object:
        try:
            job = self.__scheduler.get_job(id)
            return job

        except Exception as e:
            logging.error(f"scheduler get_job error...... {str(e)}")
            raise e

    def add_job(self, task_id="", trigger=None, task=None) -> bool:
        try:
            settings = {}
            settings["id"] = task_id if task_id else str(uuid4())
            settings["func"] = task.run
            settings["coalesce"] = True
            settings["replace_existing"] = True
            settings = {**settings, **(trigger.get_settings())}

            job = self.__scheduler.add_job(**settings)

            return True if self.get_job(job.id) else False

        except Exception as e:
            logging.error(f"scheduler add_job error...... {str(e)}")
            raise e

    def remove_job(self, id: str) -> bool:
        try:
            self.__scheduler.remove_job(id)
            return True if not self.get_job(id) else False

        except JobLookupError as e:
            return False

        except Exception as e:
            logging.error(f"scheduler remove_job error...... {str(e)}")
            raise e

    def pause_job(self, id: str) -> bool:
        try:
            job = self.__scheduler.pause_job(id)
            return True if job else False

        except Exception as e:
            logging.error(f"scheduler pause_job error...... {str(e)}")
            raise e

    def resume_job(self, id: str) -> bool:
        try:
            job = self.__scheduler.resume_job(id)
            return True if job else False

        except Exception as e:
            logging.error(f"scheduler resume_job error...... {str(e)}")
            raise e
示例#30
0
class Scheduler(object):
    def __init__(self, tasks, settings):
        self.settings = settings
        self.logger = get_logger('scheduler', self.settings)

        self.intervals = self.settings['scheduler']['intervals']
        self.apscheduler = { 'apscheduler': self.settings['scheduler']['apscheduler'] }

        if not isinstance(tasks, dict):
            self.logger.error('tasks is not a dictionary')
            return

        if not isinstance(self.intervals, dict):
            self.logger.error('intervals is not a dictionary')
            return

        self.tasks = self._flatten_dict(tasks, '')

        self.logger.debug('Tasks found:')
        self.logger.debug(LINE_SPLITTER)
        for key in self.tasks:
            self.logger.debug('%45s %30s' % (key, self.tasks[key].task_type))
        self.logger.debug(LINE_SPLITTER)

        #self.logger.debug('Checking tasks paths!')
        # TODO: Check if paths are valid

    def init(self):
        """ Initializes the queue, and adds the tasks """

        self.logger.info('Initilizing APScheduler...')

        apsched_kwargs = self._flatten_dict(self.apscheduler, '')
        apsched_kwargs['apscheduler.logger'] = get_logger('apscheduler', self.settings)

        self.sched = BackgroundScheduler(apsched_kwargs)

        for (id, task) in self.tasks.items():
            task_type = task.task_type

            self.logger.debug('Adding task "%s" [%s]' % (id, task_type))

            if not task_type in self.intervals:
                self.logger.info('Interval not defined for "%s" class. Assuming it is an once-time task' % task_type)
                self.add_task(id, task)
                continue

            self.add_task(id, task, self.intervals[task_type])

        self.logger.info('APScheduler initialized!')

    def clear(self):
        """ Removes all jobs from scheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        for job in self.sched.get_jobs():
            job.remove()

    def start(self):
        """ Start the scheduler by starting the instance of APScheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        try:
            self.sched.start()
        except SchedulerAlreadyRunningError as e:
            self.logger.warning(e)


    def stop(self, wait=True):
        """ Stop the scheduler. If wait=True, then it will be stopped after
            all jobs that are currently executed will finish """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        try:
            self.sched.shutdown(wait=wait)
        except SchedulerNotRunningError as e:
            self.logger.warning(e)

    def add_task(self, id, func, interval=None):
        """ Adds a new task into the queue. If interval is None then the task
            will be executed once. """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        if not hasattr(func, '__call__'):
            self.logger.error('"func" is not callable')
            return

        try:
            if isinstance(interval, dict):
                self.sched.add_job(func, trigger='interval', id=id, **interval)
            elif interval is None: # Run once (ommit trigger)
                self.sched.add_job(func, id=id)
            else:
                self.logger.error('"interval" is not an instance of [time|None]')
                return
        except ConflictingIdError as e:
            self.logger.warning(e)

    def remove_task(self, id):
        """ Remove a job from the queue """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        try:
            self.sched.remove_job(id)
        except JobLookupError as e:
            self.logger.warning(e)

    def force_update(self, job_id=None):
        """ Updates a job with id == job_id, or all jobs if no id is given """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        if not job_id:
            self.logger.info("Forcing update of all jobs")
            for job in self.sched.get_jobs():
                self.__run_job(job)
        else:
            self.logger.info("Forcing update of job %s" % job_id)
            job = self.sched.get_job(job_id)

            if not job:
                self.logger.warn("Job %s not found" % job_id)
            else:
                self.__run_job(job)

    def __run_job(self, job):
        if job.func:
            # Add the job to the scheduler and run it just once
            self.sched.add_job(job.func)

            # If we explicity call job.func() then we block the thread and we get multiple
            # missed executions from apscheduler
            # job.func()
        else:
            self.logger.warn("Job %s has a None type callable func" % job.id)

    def _flatten_dict(self, d, path):
        new_dict = { }
        for key in d:
            if isinstance(d[key], dict):
                new_path = '%s.%s' % (path, key) if path else key

                x = self._flatten_dict(d[key],new_path).copy()
                new_dict.update(x)
            else:
                new_key = '%s.%s' % (path, key)
                new_dict[new_key] = d[key]

        return new_dict
示例#31
0
class ExScheduler(object):
    def __init__(self):
        self.__scheduler = BackgroundScheduler()
        self.__jobstatuslist = {}

    def start_scheduler(self, **options):
        self.__scheduler.configure(**options)
        self.__scheduler.start()

    def shutdownscheduler(self):
        self.__scheduler.shutdown(wait=False)

    def getjob(self, jobid):
        return self.__scheduler.get_job(jobid)

    def scheduledjob(self, *args, **kw):
        return self.__scheduler.scheduled_job(*args, **kw)

    def addjob(self, *args, **kw):
        job = self.__scheduler.add_job(*args, **kw)
        self.__scheduler.wakeup()
        return job

    def removejob(self, job_id, jobstore=None):
        self.__scheduler.remove_job(job_id=job_id, jobstore=jobstore)

    def pausejob(self, job_id, jobstore=None):
        self.__scheduler.pause_job(job_id=job_id, jobstore=jobstore)
        self.setjobstatus(job_id, JobStatus.PAUSED)

    def resumejob(self, job_id, jobstore=None):
        self.__scheduler.resume_job(job_id=job_id, jobstore=jobstore)
        self.setjobstatus(job_id, JobStatus.SCHEDULING)

    def modifyjob(self, job_id, jobstore=None, **kw):
        job = self.__scheduler.modify_job(job_id=job_id,
                                          jobstore=jobstore,
                                          **kw)
        return job

    def getjoblist(self):
        joblist = self.__scheduler.get_jobs()
        return map(self._getjobinfo, joblist)

    def addlistener(self, callback, mask=EVENT_ALL):
        self.__scheduler.add_listener(callback, mask)

    def setjobstatus(self, jobid, jobstatus):
        self.__jobstatuslist[jobid] = jobstatus

    def jobstatusinitial(self, job_id, jobstore=None):
        job = self.__scheduler.get_job(job_id=job_id, jobstore=jobstore)
        if job_id not in self.__jobstatuslist:
            status = (JobStatus.SCHEDULING
                      if job.next_run_time else JobStatus.PAUSED) if hasattr(
                          job, 'next_run_time') else JobStatus.PENDING
            self.setjobstatus(job_id, status)

    def _getjobinfo(self, job):
        self.jobstatusinitial(job.id)
        return {
            "id":
            str(job.id),
            "name":
            str(job.name),
            "kwargs":
            job.kwargs,
            "trigger":
            trigger_str_to_dict(str(job.trigger)),
            "next_run_time":
            datetime_repr(job.next_run_time)
            if self.__jobstatuslist[str(job.id)] == JobStatus.SCHEDULING or
            self.__jobstatuslist[str(job.id)] == JobStatus.RUNNING else "--",
            "status":
            self.__jobstatuslist[str(job.id)]
        }
class Scheduler(threading.Thread):
    def __init__(self, gc) -> None:
        super().__init__()
        self.name = "scheduler"
        self.exit = threading.Event()
        self.gc = gc
        self._sched = BackgroundScheduler(daemon=True)
        self._event_prefixes = tuple((f"{e.name.lower()}:" for e in EventType))
        add_event_listener(EventType.SHUTDOWN, self.shutdown)

    def run(self):
        self._sched.start()
        if ArgumentParser.args.scheduler_config:
            self.read_config(ArgumentParser.args.scheduler_config)
        self.exit.wait()

    def shutdown(self, event: Event):
        log.debug(f"Received request to shutdown scheduler {event.event_type}")
        if self._sched.running:
            self._sched.shutdown(wait=False)
        self.exit.set()

    def read_config(self, config_file: str) -> None:
        log.debug(f"Reading scheduler configuration file {config_file}")
        try:
            with open(config_file, "r") as fp:
                for line in fp:
                    line = line.strip()
                    if not line.startswith("#") and len(line) > 0:
                        self.add_job(line)
        except Exception:
            log.exception(
                f"Failed to read scheduler configuration file {config_file}")

    def scheduled_command(self, command):
        log.debug(f"Running scheduled command {command}")
        return cli_event_handler(command, graph=self.gc.graph)

    def add_job(self, args: str) -> Job:
        args = args.strip()
        cron = re.split(r"\s+", args, 5)
        if len(cron) != 6:
            raise ValueError(f"Invalid job {args}")
        minute, hour, day, month, day_of_week, command = cron
        if str(command).startswith(self._event_prefixes):
            event, cmd = command.split(":", 1)
            log.debug(
                (f"Scheduling to register command '{cmd}' for event {event} at"
                 f" minute={minute}, hour={hour}, day={day}, month={month},"
                 f" day_of_week={day_of_week}"))
            job = self._sched.add_job(
                register_cli_action,
                "cron",
                args=[command, True],
                minute=minute,
                hour=hour,
                day=day,
                month=month,
                day_of_week=day_of_week,
            )
        else:
            log.debug(
                (f"Scheduling command '{command}' at"
                 f" minute={minute}, hour={hour}, day={day}, month={month},"
                 f" day_of_week={day_of_week}"))
            job = self._sched.add_job(
                self.scheduled_command,
                "cron",
                args=[command],
                minute=minute,
                hour=hour,
                day=day,
                month=month,
                day_of_week=day_of_week,
            )
        return job

    def remove_job(self, job_id: str) -> bool:
        try:
            self._sched.remove_job(job_id)
        except JobLookupError:
            log.error(f"Couldn't find job with id {job_id}")
        else:
            return True
        return False

    def get_jobs(self) -> Iterable:
        for job in self._sched.get_jobs():
            if isinstance(job.trigger, CronTrigger):
                trigger_map = {}
                for field in job.trigger.fields:
                    trigger_map[field.name] = str(field)
                cron_line = (
                    f"{job.id}: {trigger_map.get('minute')} {trigger_map.get('hour')}"
                    f" {trigger_map.get('day')} {trigger_map.get('month')}"
                    f" {trigger_map.get('day_of_week')} {job.args[0]}")
                yield cron_line

    @staticmethod
    def add_args(arg_parser: ArgumentParser) -> None:
        arg_parser.add_argument(
            "--scheduler-config",
            help="Scheduler config in crontab format",
            default=None,
            dest="scheduler_config",
            type=str,
        )
示例#33
0
class WorkersManager:
    class Command:
        def __init__(self, callback, timeout, args=(), options=dict()):
            self._callback = callback
            self._timeout = timeout
            self._args = args
            self._options = options
            self._source = "{}.{}".format(
                callback.__self__.__class__.__name__
                if hasattr(callback, "__self__")
                else callback.__module__,
                callback.__name__,
            )

        def execute(self):
            messages = []

            try:
                with timeout(
                    self._timeout,
                    exception=WorkerTimeoutError(
                        "Execution of command {} timed out after {} seconds".format(
                            self._source, self._timeout
                        )
                    ),
                ):
                    if inspect.isgeneratorfunction(self._callback):
                        for message in self._callback(*self._args):
                            messages += message
                    else:
                        messages = self._callback(*self._args)
            except WorkerTimeoutError as e:
                if messages:
                    logger.log_exception(
                        _LOGGER, "%s, sending only partial update", e, suppress=True
                    )
                else:
                    raise e

            _LOGGER.debug("Execution result of command %s: %s", self._source, messages)
            return messages

    def __init__(self, config):
        self._mqtt_callbacks = []
        self._config_commands = []
        self._update_commands = []
        self._scheduler = BackgroundScheduler(timezone=utc)
        self._daemons = []
        self._config = config
        self._command_timeout = config.get("command_timeout", DEFAULT_COMMAND_TIMEOUT)

    def register_workers(self, global_topic_prefix):
        for (worker_name, worker_config) in self._config["workers"].items():
            module_obj = importlib.import_module("workers.%s" % worker_name)
            klass = getattr(module_obj, "%sWorker" % worker_name.title())

            if module_obj.REQUIREMENTS is not None:
                self._pip_install_helper(module_obj.REQUIREMENTS)

            command_timeout = worker_config.get(
                "command_timeout", self._command_timeout
            )
            worker_obj = klass(
                command_timeout, global_topic_prefix, **worker_config["args"]
            )

            if "sensor_config" in self._config and hasattr(worker_obj, "config"):
                _LOGGER.debug(
                    "Added %s config with a %d seconds timeout", repr(worker_obj), 2
                )
                command = self.Command(worker_obj.config, 2, [])
                self._config_commands.append(command)

            if hasattr(worker_obj, "status_update"):
                _LOGGER.debug(
                    "Added %s worker with %d seconds interval and a %d seconds timeout",
                    repr(worker_obj),
                    worker_config["update_interval"],
                    worker_obj.command_timeout,
                )
                command = self.Command(
                    worker_obj.status_update, worker_obj.command_timeout, []
                )
                self._update_commands.append(command)

                if "update_interval" in worker_config:
                    job_id = "{}_interval_job".format(worker_name)
                    interval_job = self._scheduler.add_job(
                        partial(self._queue_command, command),
                        "interval",
                        seconds=worker_config["update_interval"],
                        id=job_id,
                    )
                    self._mqtt_callbacks.append(
                        (
                            worker_obj.format_topic("update_interval"),
                            partial(self._update_interval_wrapper, command, job_id),
                        )
                    )
            elif hasattr(worker_obj, "run"):
                _LOGGER.debug("Registered %s as daemon", repr(worker_obj))
                self._daemons.append(worker_obj)
            else:
                raise "%s cannot be initialized, it has to define run or status_update method" % worker_name

            if "topic_subscription" in worker_config:
                self._mqtt_callbacks.append(
                    (
                        worker_config["topic_subscription"],
                        partial(self._on_command_wrapper, worker_obj),
                    )
                )

        if "topic_subscription" in self._config:
            for (callback_name, options) in self._config["topic_subscription"].items():
                self._mqtt_callbacks.append(
                    (
                        options["topic"],
                        lambda client, _, c: self._queue_if_matching_payload(
                            self.Command(
                                getattr(self, callback_name), self._command_timeout
                            ),
                            c.payload,
                            options["payload"],
                        ),
                    )
                )

        return self

    def start(self, mqtt):
        mqtt.callbacks_subscription(self._mqtt_callbacks)

        if "sensor_config" in self._config:
            self._publish_config(mqtt)

        self._scheduler.start()
        self.update_all()
        for daemon in self._daemons:
            threading.Thread(target=daemon.run, args=[mqtt], daemon=True).start()

    def _queue_if_matching_payload(self, command, payload, expected_payload):
        if payload.decode("utf-8") == expected_payload:
            self._queue_command(command)

    def update_all(self):
        _LOGGER.debug("Updating all workers")
        for command in self._update_commands:
            self._queue_command(command)

    @staticmethod
    def _queue_command(command):
        _WORKERS_QUEUE.put(command)

    @staticmethod
    def _pip_install_helper(package_names):
        for package in package_names:
            pip_main(["install", "-q", package])
        logger.reset()

    def _update_interval_wrapper(self, command, job_id, client, userdata, c):
        _LOGGER.info("Recieved updated interval for %s with: %s", c.topic, c.payload)
        try:
            new_interval = int(c.payload)
            self._scheduler.remove_job(job_id)
            self._scheduler.add_job(
                partial(self._queue_command, command),
                "interval",
                seconds=new_interval,
                id=job_id,
            )
        except ValueError:
            logger.log_exception(
                _LOGGER, "Ignoring invalid new interval: %s", c.payload
            )

    def _on_command_wrapper(self, worker_obj, client, userdata, c):
        _LOGGER.debug(
            "Received command for %s on %s: %s", repr(worker_obj), c.topic, c.payload
        )
        global_topic_prefix = userdata["global_topic_prefix"]
        topic = (
            c.topic[len(global_topic_prefix + "/") :]
            if global_topic_prefix is not None
            else c.topic
        )
        self._queue_command(
            self.Command(
                worker_obj.on_command, worker_obj.command_timeout, [topic, c.payload]
            )
        )

    def _publish_config(self, mqtt):
        for command in self._config_commands:
            messages = command.execute()
            for msg in messages:
                msg.topic = "{}/{}".format(
                    self._config["sensor_config"].get("topic", "homeassistant"),
                    msg.topic,
                )
                msg.retain = self._config["sensor_config"].get("retain", True)
            mqtt.publish(messages)
示例#34
0
class PeriodicManager(LoggingMixin):
    def __init__(self, mailbox: Mailbox):
        super().__init__()
        self.mailbox = mailbox
        self.sc = BackgroundScheduler()

    def start(self):
        self.sc.start()

    def shutdown(self):
        self.sc.shutdown()

    def _generate_job_id(self, run_id, task_id):
        return '{}:{}'.format(run_id, task_id)

    def add_task(self, run_id, task_id, periodic_config):
        if 'cron' in periodic_config:
            self.sc.add_job(id=self._generate_job_id(run_id, task_id),
                            func=trigger_periodic_task, args=(self.mailbox, run_id, task_id),
                            trigger=CronTrigger.from_crontab(periodic_config['cron']))
        elif 'interval' in periodic_config:
            interval_config: dict = periodic_config['interval']
            if 'seconds' in interval_config:
                seconds = interval_config['seconds']
            else:
                seconds = 0
            
            if 'minutes' in interval_config:
                minutes = interval_config['minutes']
            else:
                minutes = 0
            
            if 'hours' in interval_config:
                hours = interval_config['hours']
            else:
                hours = 0
                
            if 'days' in interval_config:
                days = interval_config['days']
            else:
                days = 0
            
            if 'weeks' in interval_config:
                weeks = interval_config['weeks']
            else:
                weeks = 0
            
            if seconds < 10 and 0 >= minutes and 0 >= hours and 0 >= days and 0 >= weeks:
                self.log.error('Interval mast greater than 20 seconds')
                return 
            self.sc.add_job(id=self._generate_job_id(run_id, task_id),
                            func=trigger_periodic_task, args=(self.mailbox, run_id, task_id),
                            trigger=IntervalTrigger(seconds=seconds, 
                                                    minutes=minutes, 
                                                    hours=hours, 
                                                    days=days, 
                                                    weeks=weeks))
        else:
            self.log.error('Periodic support type cron or interval. current periodic config {}'.format(periodic_config))

    def remove_task(self, run_id, task_id):
        self.sc.remove_job(job_id=self._generate_job_id(run_id, task_id))
示例#35
0
class Scheduler:
    TRIGGERS = {
        "trig_5minutes": {
            "id": "trig_5minutes",
            "name": "Every five minutes",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(minute="*/5"),
            "from_trigger": lambda trig: []
        },
        "trig_hourly": {
            "id": "trig_hourly",
            "name": "Each hour",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(hour="*"),
            "from_trigger": lambda trig: []
        },
        "trig_daily": {
            "id": "trig_daily",
            "name": "Each day",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(day="*"),
            "from_trigger": lambda trig: []
        },
        "trig_weekday": {
            "id":
            "trig_weekday",
            "name":
            "Each weekday",
            "options": [{
                "id": i,
                "name": el,
                "active": True
            } for i, el in enumerate("Mon Tue Wed Thu Fri Sat Sun".split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 6
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(day_of_week=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[4]).split(",")]
        },
        "trig_monthly": {
            "id":
            "trig_monthly",
            "name":
            "Each month",
            "options": [{
                "id": i + 1,
                "name": el,
                "active": True
            } for i, el in enumerate(("Jan Feb Mar Apr May Jun "
                                      "Jul Aug Sep Oct Nov Dec").split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 12
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(month=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[1]).split(",")]
        },
    }
    """Predefined triggers and their argument checks."""
    def __init__(self,
                 elastic,
                 crawler_dir="crawlers",
                 crawler_args={},
                 **cron_defaults):
        """Initializes the scheduler by binding it to it's elasticsearch db.

        Args:
            elastic (elasticsearch.Elasticsearh): The es-client to save the
                crawling jobs in.
            crawler_dir (str): the directory, where the crawlers will be found.
                Defaults to "crawlers".
            job_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.
            **cron_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.

        Returns:
            Scheduler: a fresh Scheduler instance.
        """
        jobstores = {
            "default": {
                "type": "memory"
            },
            "elastic": InjectorJobStore(kwargs=crawler_args, client=elastic)
        }

        executors = {
            "default": ThreadPoolExecutor(10),
            "processpool": ProcessPoolExecutor(10)
        }

        job_defaults = {
            "misfire_grace_time": 5 * 60,  # 5min
            "coalesce": True,
        }

        self.cron_defaults = utility.DefaultDict(
            {
                # standard is every day at 00:00:00
                "hour": 0,
                "minute": 0,
                "second": 0
            },
            **cron_defaults)

        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults,
                                             timezone=utc)

        self.crawlers = _detect_crawlers()
        # set up the validator schema.
        self.job_validator = cerberus.Validator(SCHEMATA["job"]({
            "trigger_ids":
            list(self.TRIGGERS)
        }),
                                                allow_unknown=True)
        self.scheduler.start()

    def upsert_job(self, job_dict, **runtime_args):
        """Adds or updates a job using the provided user_input.

        If an id field is present in the dict, the job is updated, otherwise
        a new one is created.

        Args:
            job_dict (dict): user input for a job, as defined in `SCHEMATA`.
            **runtime_args (dict): additional runtime arguments for the
                crawler.

        Returns:
            apscheduler.job.Job: a new job Object.
        """
        if not self.job_validator.validate(job_dict):
            raise (AssertionError(str(self.job_validator.errors)))

        doc = utility.SDA(job_dict)

        job = self.crawlers.get(doc["crawler.id"], None)
        # default to the SearchPlugin, and give the search name as argument.
        if job is None:
            inst = {
                "args": ("SearchPlugin", runtime_args),
                "kwargs": dict(search_id=doc["crawler.id"])
            }
        else:
            inst = {"args": (doc["crawler.id"], runtime_args), "kwargs": {}}
        trigger = self._make_trigger(doc["schedule"])

        if doc["id"]:
            self.scheduler.modify_job(doc["id"],
                                      jobstore="elastic",
                                      func=_run_plugin,
                                      name=doc["name.name"],
                                      **inst)
            new_job = self.scheduler.reschedule_job(doc["id"],
                                                    jobstore="elastic",
                                                    trigger=trigger)
        else:
            # use the crawler id as name, when the job is created.
            new_job = self.scheduler.add_job(_run_plugin,
                                             jobstore="elastic",
                                             trigger=trigger,
                                             name=doc["crawler.id"],
                                             **inst)

        return new_job

    def get_triggers(self):
        """Returns a list of triggers, that are predefined in the system.

        Returns:
            list: a list of tuples, holding id and name for each trigger.
        """
        return [{
            "id": v["id"],
            "name": v["name"],
            "options": v["options"]
        } for v in self.TRIGGERS.values()]

    def sync_jobs(self, joblist):
        """Synchronize the current jobs with a given list of jobs.

        This means, that all jobs not included in the list will be removed,
        existing ones will be updated and new ones will be added to the
        scheduler.

        Args:
            joblist (list): a list of jobs in the format of the schema.

        Returns:
            bool: whether this operation was successful or not.
        """
        logger.debug("Syncing job lists ...")
        current_jobs = self.get_jobs()
        jobs_to_keep = {j["id"] for j in joblist if j.get("id")}

        # remove old jobs
        for job in current_jobs:
            if job["id"] not in jobs_to_keep:
                self.scheduler.remove_job(job["id"], jobstore="elastic")

        # update and add jobs
        for job in joblist:
            self.upsert_job(job)

        return True

    def _make_trigger(self, trigger_doc):
        """Creates a trigger from a given dictionary of user input."""
        # we can assume, that an id for the trigger is given in the input.
        cur_trigger = self.TRIGGERS[trigger_doc["id"]]
        option_validator = cerberus.Validator(cur_trigger["schema"])

        args = [
            o["id"] for o in trigger_doc["options"]
            if option_validator(o) and o["active"]
        ]

        trigger_args = cur_trigger["trigger_args"](args)
        return CronTrigger(**trigger_args)

    def _serialize_trigger(self, trigger):
        """Serializes a trigger into a json array, as defined in TRIGGERS."""
        # since we only have a defined set of triggers, the following is
        # possible.
        mapping = [(v["trigger_args"]([]).keys(), k)
                   for k, v in self.TRIGGERS.items()]

        trigger_doc = None
        result = {}
        for keys, name in mapping:
            # all keys for the mapping need to be defined.
            def_keys = [f.name for f in trigger.fields if not f.is_default]
            if all([(key in def_keys) for key in keys]):
                trigger_doc = self.TRIGGERS[name]
                break

        if not trigger_doc:
            return result

        result["name"] = trigger_doc["name"]
        result["id"] = trigger_doc["id"]
        args = set(trigger_doc["from_trigger"](trigger))
        # copy the list of options (otherwise this leads to nasty side effects)
        options = [dict(**item) for item in trigger_doc["options"]]
        for option in options:
            option["active"] = option["id"] in args
        result["options"] = options

        return result

    def get_jobs(self):
        """Returns a list of jobs that are scheduled in the system.

        Returns:
            list: a list of job-dicts, holding the id and the runtimes.
        """
        jobs = self.scheduler.get_jobs()
        joblist = []
        for job in jobs:
            joblist.append({
                "id": job.id,
                "name": {
                    "name": job.name
                },
                "crawler": {
                    "id": job.args[0]
                },
                "schedule": self._serialize_trigger(job.trigger),
                "next_run": {
                    "name": job.next_run_time
                }
            })
        logger.debug(f"Retrieved {len(joblist)} jobs from the jobstore.")
        return joblist

    def run_job(self, job_id):
        """Runs the job with the specified id immediately.

        Args:
            job_id: the id of the job that should be run.

        Returns:
            bool: whether running the job succeeded or not.
        """
        logger.debug(f"Running job '{job_id}' directly.")
        cur_job = self.scheduler.get_job(job_id, jobstore="elastic")
        if cur_job is None:
            return False

        cur_job.func(*cur_job.args, **cur_job.kwargs)
        return True
示例#36
0
class Boss(object):
    workers = {}
    dirpath = '.'
    output = None

    def __init__(self, dirpath='.', output='output'):
        '''
        local path for load config
        '''
        logger.info("Initialing BOSS")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading job config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")
        self.output = output
        logger.info("Setup output folder: " + output)
        if not os.path.isdir(output):
            logger.info("target directory " + output +
                        " doesn't exist, creating..")
            os.makedirs(output)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if 'interval' in data:
                interval = int(data['interval'])
            if self.output:
                # TODO: test case for no 'output' key in data
                if 'output' not in data:
                    data['output'] = {}
                output = data['output']
                if 'dirpath' in output:
                    output['dirpath'] = os.path.join(self.output,
                                                     output['dirpath'])
                else:
                    output['dirpath'] = self.output
                if 'type' not in data:
                    logger.error("Missing type attribute in \
                                    your configruation file [%s]" % fp)
                    return None
            if fp in self.workers:  # existing minion found
                logger.info("Update exisitng minion [%s]" % fp)
                minion = self.workers[fp]
                minion.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp,
                                          func=minion.collect,
                                          name=minion.name + '_' +
                                          minion.serial)

            else:  # Create new
                logger.info("Create new minion [%s]" % fp)
                module_path = data['type'][:data['type'].rfind(".")]
                object_name = data['type'][data['type'].rfind(".") + 1:]
                try:
                    minion_module = getattr(
                        importlib.import_module(module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                minion = minion_module(**data)
                self.workers[fp] = minion
                self.scheduler.add_job(minion.collect,
                                       'interval',
                                       id=fp,
                                       name=minion.name + '_' + minion.serial,
                                       seconds=interval)
            return minion
        return None

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#37
0
class Scheduler(object):
    def __init__(self):
        self.cron_job_args = [
            'year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute',
            'second', 'start_date', 'end_date'
        ]
        self.scheduler = BackgroundScheduler(
            timezone=getattr(config, 'TIME_ZONE', 'Asia/Shanghai'))
        self.jobs = {}
        self.already_init = False

    def init(self):
        if not self.already_init:
            for job in Job.query.filter_by(enabled=True).all():
                self.add_job(job)
            self.scheduler.start()
            self.already_init = True

    def __parse_args(self, trigger, trigger_args):
        if trigger == 'cron':
            args = {
                k: v
                for k, v in zip(self.cron_job_args, trigger_args.split(';'))
                if v
            }
            # 周需要单独处理,0 对应周一,与页面上的说明不一致
            day_of_week = int(
                args['day_of_week']) if args.get('day_of_week') else None
            if day_of_week == 0:
                args['day_of_week'] = 6
            elif day_of_week is not None:
                args['day_of_week'] = day_of_week - 1
            return args
        elif trigger == 'interval':
            return {'seconds': int(trigger_args)}
        elif trigger == 'date':
            return {'run_date': trigger_args}
        else:
            raise ValueError('未知的调度策略: %r' % trigger)

    def add_job(self, job):
        job_id = str(job.id)
        args = self.__parse_args(job.trigger, job.trigger_args)
        instance = self.scheduler.add_job(agent,
                                          job.trigger,
                                          id=job_id,
                                          args=(job.id, job.command_user,
                                                job.command, job.targets),
                                          **args)
        self.jobs[job_id] = instance

    def valid_job_trigger(self, trigger, trigger_args):
        try:
            args = self.__parse_args(trigger, trigger_args)
            job = self.scheduler.add_job(agent,
                                         trigger,
                                         args=(None, None, None, None),
                                         next_run_time=None,
                                         **args)
            job.remove()
            return True
        except ValueError:
            return False

    def remove_job(self, job_id):
        job_id = str(job_id)
        if self.scheduler.get_job(job_id):
            self.scheduler.remove_job(job_id)

    def update_job(self, job):
        job_id = str(job.id)
        if self.scheduler.get_job(job_id):
            args = self.__parse_args(job.trigger, job.trigger_args)
            self.scheduler.reschedule_job(job_id, trigger=job.trigger, **args)
        elif job.enabled:
            self.add_job(job)
示例#38
0
class SchedulerEdge(object):
    #instacia do objeto e inicia o escalonador

    def __init__(self):
        #self.verifica_sensores()

        def run_thread():
            while(True):
                time.sleep(1)

        self.scheduler = BackgroundScheduler() # atribui um agendador background
        self.scheduler.start() # inicia o agendador
        self.th = threading.Thread(target= run_thread) # thread executa outtro fluxo para o agendador rodar
        self.th.start()

        #def verifica_DB():
        #    print("Aqui")
        #self.verifica_sensores()

    def add_job(self, a): # cria uma nova tarefa no escalonador
    #  analisar o modo (interval,date,cron) para executar de forma correta
        jsonObject = json.loads(a)

        if(jsonObject['modo']=='cron'):
            self.scheduler.add_job(self.tick, jsonObject['modo'], second = jsonObject['info']['second'], minute = jsonObject['info']['minute'],
            hour = jsonObject['info']['hour'], day = jsonObject['info']['day'], month = jsonObject['info']['month'], year = jsonObject['info']['year'],id = a, args = [a])
        #    self.scheduler.add_job(self.tick, jsonObject['modo'], second = 0, minute = jsonObject['info']['minute'],
        #    hour = jsonObject['info']['hour'], day = jsonObject['info']['day'], month = jsonObject['info']['month'], year = jsonObject['info']['year'],id = a, args = [a])


        elif(jsonObject['modo']=='interval'):
            self.scheduler.add_job(self.tick, jsonObject['modo'], second = jsonObject['info']['second'], minute = jsonObject['info']['minute'],
            hour = jsonObject['info']['hour'], day = jsonObject['info']['day'], month = jsonObject['info']['month'], year = jsonObject['info']['year'],id = a, args = [a])

        elif(jsonObject['modo']=='date'):
            variabledate = datetime.date(int(jsonObject['info']['year']),int(jsonObject['info']['month'])
            ,int(jsonObject['info']['day']))

            self.scheduler.add_job(self.tick, jsonObject['modo'],run_time = datetime.date(variabledate),id = a, args = [a])

    def remove_job(self, a):
        self.scheduler.remove_job(a)

    def tick(self,response):
        object_events = Event_Treatment()
        object_events.event(1,response)
        #print("Pi")
        #print(response)

    def verifica_sensores(self):    # Verifica os sensores cadastrados no DB, colocando-os em uma tabela para
        print("Entrou")                            #       comparar quando ocorrer modificaçõe no DB. Dessa forma, é possivel
        buffer = BytesIO()          #       adicionar ou remover novos sensores em tempo de execução.
        c = pycurl.Curl()
        c.setopt(c.URL, 'http://localhost:8000/sensors/?format=json')   # Local onde se encontra todos os sensores
        c.setopt(c.WRITEDATA, buffer)                                   #       cadastrados no DB
        c.perform()
        c.close()

        body = buffer.getvalue()
        # Body is a byte string.
        # We have to know the encoding in order to print it to a text file
        # such as standard output.
        jsonObject = json.loads(body.decode('iso-8859-1'))

        i=0
        j=0
        lista_sensores = []
        for row in jsonObject:      # Pega todos os links referente a cada sensor cadastrado
            print(row['url'])
                                    # Coloca em uma tabela como a finalidade que foi descrita
            ##self.tab_temp[i] = row['url']
            lista_sensores.insert(i,row['url'])
            #lista_sensores.append(row['url'])
            i=i+1

        #print(lista_sensores)
        #teste = lista_sensores[0]
        #print(teste)
        tab_permanente = []

        if len(tab_permanente):             # Se tiver algo na tabela, ocorre atualização dos dispositivos
            for j in range(i):      #       no T_EVENTO
                print("tabela permanente existe")            # Criar uma função para comparar as tabelas || talvez uma fila

        else:
            for w in lista_sensores:
                tab_permanente.insert(j,lista_sensores[j])
                #Chamar um metodo para retornar um json.
                print("Json sensor", j)
#-------------------------------------------------------------------------------
                buffer = BytesIO()          #       adicionar ou remover novos sensores em tempo de execução.
                c = pycurl.Curl()
                c.setopt(c.URL, lista_sensores[j])   # Local onde se encontra todos os sensores
                c.setopt(c.WRITEDATA, buffer)                                   #       cadastrados no DB
                c.perform()
                c.close()

                body = buffer.getvalue()
                jsonObject = json.loads(body.decode('iso-8859-1'))
                print(jsonObject)
#-------------------------------------------------------------------------------

                self.add_job(body.decode('iso-8859-1'))
                j=j+1
示例#39
0
class Boss(object):
    workers = {}
    dirpath = '.'
    output = None

    def __init__(self, dirpath='.', output='output'):
        '''
        local path for load config
        '''
        logger.info("Initialing BOSS")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading job config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")
        self.output = output
        logger.info("Setup output folder: " + output)
        if not os.path.isdir(output):
            logger.info("target directory "
                        + output
                        + " doesn't exist, creating..")
            os.makedirs(output)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if 'interval' in data:
                interval = int(data['interval'])
            if self.output:
                # TODO: test case for no 'output' key in data
                if 'output' not in data:
                    data['output'] = {}
                output = data['output']
                if 'dirpath' in output:
                    output['dirpath'] = os.path.join(self.output, output['dirpath'])
                else:
                    output['dirpath'] = self.output
                if 'type' not in data:
                    logger.error("Missing type attribute in \
                                    your configruation file [%s]" % fp)
                    return None
            if fp in self.workers:  # existing minion found
                logger.info("Update exisitng minion [%s]" % fp)
                minion = self.workers[fp]
                minion.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp,
                                          func=minion.collect,
                                          name=minion.name+'_'+minion.serial
                                          )

            else:  # Create new
                logger.info("Create new minion [%s]" % fp)
                module_path = data['type'][:data['type'].rfind(".")]
                object_name = data['type'][data['type'].rfind(".") + 1:]
                try:
                    minion_module = getattr(importlib.import_module(
                                            module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                minion = minion_module(**data)
                self.workers[fp] = minion
                self.scheduler.add_job(minion.collect, 'interval',
                                       id=fp,
                                       name=minion.name + '_' + minion.serial,
                                       seconds=interval
                                       )
            return minion
        return None

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#40
0
class Scheduler():
    """ This class handles event adding, removing and listing """
    def __init__(self):
        self.is_running = False
        self.scheduler = BackgroundScheduler()
        self.supported = ['mail', 'slack', 'whatsapp']
        self.counter = 0

    def process_event(self, event):
        """ Used adding jobs to scheduler. """
        event_description = json.loads(event.description)

        message = 'This is default message.'
        if 'message' in event_description:
            message = event_description['message']

        receiver = None
        if 'receiver' in event_description:
            receiver = event_description['receiver']

        subject = None
        if 'subject' in event_description:
            subject = event_description['subject']

        if not event_description['how'] in self.supported:
            raise NameError(
                'Unknown option. Only mail, slack and whatsapp are supported now.'
            )

        id = str(self.counter)

        if event_description['how'] == 'mail':
            self.scheduler.add_job(Mail.send,
                                   next_run_time=event.begin.datetime,
                                   kwargs=dict(message=message,
                                               receiver=receiver,
                                               subject=subject),
                                   id=id,
                                   name=event.name)

        if event_description['how'] == 'slack':
            self.scheduler.add_job(Slack.send,
                                   next_run_time=event.begin.datetime,
                                   kwargs=dict(message=message,
                                               channel=receiver),
                                   id=id,
                                   name=event.name)

        if event_description['how'] == 'whatsapp':
            self.scheduler.add_job(WhatsApp.send,
                                   next_run_time=event.begin.datetime,
                                   kwargs=dict(message=message,
                                               receiver=receiver),
                                   id=id,
                                   name=event.name)

        self.counter += 1

        return id

    def get_events(self):
        """
        Method returns list of scheduled events.
        """
        events = []
        for event in self.scheduler.get_jobs():
            function_name = str(event.func)
            fc = 'Mail'
            if 'WhatsApp' in function_name:
                fc = 'WhatsApp'
            if 'Slack' in function_name:
                fc = 'Slack'
            events.append(
                str('ID: ' + event.id + ', Name: ' + event.__str__() + ' ' +
                    fc + ' ' + str(event.kwargs)))
        # print(events)
        return events

    def run(self):
        """
        Starts scheduler.
        """
        if not self.is_running:
            self.scheduler.start()
        self.is_running = True

    def stop(self):
        """
        Stops scheduler.
        """
        if self.is_running:
            self.scheduler.shutdown()
        self.is_running = False

    def read_calendar(self, file):
        """
        Method will read specified calendar from file in ics format.
        """
        with open(file) as f:
            ics = f.read()
        return Calendar(ics)

    def load_calendar(self, calendar_file):
        """
        Method will add events from calendar to scheduler.
        """
        c = self.read_calendar(calendar_file)

        for event in c.events:
            self.process_event(event)

    @staticmethod
    def create_description(how='mail',
                           receiver=None,
                           message=None,
                           subject=None):
        """
        Method will create description to match for adding job.
        """
        d = '{'
        d += '"how": "'
        d += how
        d += '"'

        if receiver != None:
            d += ', "receiver": "'
            d += receiver
            d += '"'

        if message != None:
            d += ', "message": "'
            d += message
            d += '"'

        if subject != None:
            d += ', "subject": "'
            d += subject
            d += '"'

        d += '}'
        # print(d)
        return d

    @staticmethod
    def create_event(name,
                     when,
                     how='mail',
                     receiver=None,
                     message=None,
                     subject=None):
        """
        Method will create event from specified parameters which can be later added to scheduler.
        """
        e = Event()
        e.name = name
        e.begin = when
        e.description = Scheduler.create_description(how, receiver, message,
                                                     subject)

        return e

    def remove_event(self, event_id):
        """
        Remove event from scheduler specified by his id.
        """
        try:
            self.scheduler.remove_job(str(event_id))
            return True

        except:
            return False
示例#41
0
文件: boss.py 项目: zapion/mozMinions
class Boss(object):
    default_path = None
    workers = []

    def __init__(self):
        '''
        local path for load config
        '''
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        if self.default_path:
            self.load(self.default_path)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fn in filenames:
            self.load(os.path.join(dirpath, fn))

    def load(self, fp):
        '''
        given a file
        TBI: directory
        '''
        with open(fp) as in_data:
            data = json.load(in_data)
            minion = ShellMinion(**data)
            self.workers.append(minion)
            self.scheduler.add_job(minion.collect, 'interval',
                                   name=minion.name+'_'+minion.serial, seconds=2
                                   )

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for worker in self.workers:
            print(str(worker))

    def remove(self, sn):
        '''
        given an SN, stop running instance if possible
        TODO: remove it from the list
        '''
        self.scheduler.remove_job(sn)

    def remove_advanced(self):
        '''
        TBD
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def stop(self, sn):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.stop(sn)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#42
0
class BotBase:
    def __init__(self, chatter: ChatterBase):
        self.session = Session()
        self.subscribers = {}

        # I never expect to have any significant amount of users, so I'll just load all in memory on start
        self.load_subscribers()

        self.chatter = chatter
        self.chatter.set_new_message_event(self.new_message_event)

        self.scheduler = BackgroundScheduler()
        self.schedule_jobs()

    def load_subscribers(self):
        for row in self.session.query(Subscriber):
            s: Subscriber = row
            self.subscribers[s.user_id] = {
                'repeat': s.repeat,
                'sleep_from': s.sleep_from,
                'sleep_to': s.sleep_to
            }

    # bot_base has essential commands: subscribe/unsubscribe/sleep
    def user_subscribe(self, user_id, period):
        session = self.session
        subscriber = session.query(Subscriber).filter_by(
            user_id=user_id).first()

        if subscriber:
            subscriber.repeat = period
            self.scheduler.remove_job(user_id)
        else:
            subscriber = Subscriber(user_id=user_id, repeat=period)

        session.add(subscriber)
        session.commit()

        self.scheduler.add_job(self.send_to_periodic_subscribers,
                               'interval',
                               minutes=period,
                               args=[user_id],
                               id=user_id)
        self.subscribers[user_id] = {
            'repeat': period,
            'sleep_from': 0,
            'sleep_to': 0
        }

    def user_unsubscribe(self, user_id):
        session = self.session
        subscriber = session.query(Subscriber).filter(
            Subscriber.user_id == user_id).first()

        if subscriber:
            self.scheduler.remove_job(user_id)

        session.query(Subscriber).filter(
            Subscriber.user_id == user_id).delete()
        session.commit()

        if user_id in self.subscribers.keys():
            del self.subscribers[user_id]

    def user_sleep(self, user_id, sleep_from, sleep_to):
        session = self.session
        subscriber = session.query(Subscriber).filter(
            Subscriber.user_id == user_id).first()
        subscriber.sleep_from = sleep_from
        subscriber.sleep_to = sleep_to

        session.add(subscriber)
        session.commit()

        self.subscribers[user_id]['sleep_from'] = sleep_from
        self.subscribers[user_id]['sleep_to'] = sleep_to

        print('sleep configured for {}'.format(user_id))

    def user_get_subscription(self, user_id):
        session = self.session
        subscriber = session.query(Subscriber).filter(
            Subscriber.user_id == user_id).first()

        return subscriber.repeat if subscriber else 0

    def schedule_jobs(self):
        print('scheduling jobs...')
        for key, value in self.subscribers.items():
            self.scheduler.add_job(self.send_to_periodic_subscribers,
                                   'interval',
                                   minutes=value['repeat'],
                                   args=[key],
                                   id=key)
            print('scheduling {} every {}'.format(key, value['repeat']))

        self.scheduler.start()

    def is_sleep_time(self, user_id):
        subscriber_settings: dict = self.subscribers.get(user_id)
        sleep_from = subscriber_settings.get('sleep_from', 0)
        sleep_to = subscriber_settings.get('sleep_to', 0)

        if not sleep_from and not sleep_to:
            return False

        sleep_from = time(sleep_from, 0, 0)
        sleep_to = time(sleep_to, 0, 0)

        cur_time = datetime.now().time()

        def time_in_range(start, end, x):
            if start <= end:
                return start <= x <= end
            else:
                return start <= x or x <= end

        if time_in_range(sleep_from, sleep_to, cur_time):
            return True
        else:
            return False

    def get_next_run_time(self, user_id):
        t = self.scheduler.get_job(user_id).next_run_time
        t = t.strftime('%H:%M:%S')
        return t

    def get_sleep_hours(self, user_id):
        subscriber = self.session.query(Subscriber).filter(
            Subscriber.user_id == user_id).first()

        sleep_from = subscriber.sleep_from
        sleep_to = subscriber.sleep_to
        return sleep_from, sleep_to

    # respond or distribute message to all subscribers
    def say(self, message, chat_id, bold=False, mono=False, colour=''):
        self.chatter.send_message(message, chat_id, bold, colour, mono)

    # classes can implement it
    def new_message_event(self, message_text, message_source, chat_id):
        pass

    def start_listening(self):  # start bot's event loop
        while True:
            self.chatter.cycle()
        # self.chatter.start_listening()

    def send_to_periodic_subscribers(self, chat_id):
        if self.is_sleep_time(chat_id):
            raise SleepTimeException
示例#43
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    class NoRunningFilter(logging.Filter):
        def filter(self, record):
            return not record.msg.startswith('Execution')

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

        my_filter = self.NoRunningFilter()
        logging.getLogger("apscheduler.scheduler").addFilter(my_filter)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        loaded = False
        for _ in range(10):
            try:
                with open(fp) as in_data:
                    data = json.load(in_data)
                    # default will load JOB_NAME parameter in Jenkins created json file
                    data['name'] = data.get('JOB_NAME', "Jenkins Job")
                    data['path'] = fp
                    loaded = True
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            except Exception as e:
                logger.warning("File is not ready. Wait 1 second for another try.")
                time.sleep(1)

        if not loaded:
            logger.warning(fp + " is not ready for 10 seconds.")
            return None

        # load interval value from Jenkins created json file (default : 30 )
        interval = int(data.get('interval', 30))

        # load outputpath and defaultoutputpath from Jenkins created json file
        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = data.get('AGENT_MODULE_PATH', "hasalTask")
            object_name = data.get('AGENT_OBJECT_NAME', "HasalTask")
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#44
0
class Jobs_Scheduler():
    
    __lock_sending_to_msg_q = threading.Lock()
    
    def __init__(self):
        
        self.__scheduler = None
        self.__init_scheduler()
    
    def __init_scheduler(self):
        try:
            
            self.__scheduler = BackgroundScheduler()
            self.__load_scheduled_jobs()
            
        except Exception as e:
            Logger.log_debug('ERROR @ Job_Scheduler -> __init_scheduler')
            Logger.log_error(str(e))
        finally:
            Logger.log_info('Done with initializing job scheduler service')
        
    def start_scheduler(self):
        self.__scheduler.start()
    
    def stop_scheduler(self):
        self.__scheduler.stop()
    
    def __load_scheduled_jobs(self):

        try:
            file_path = cfg.SCHEDULED_JOBS_FILES_PATH

            for file in glob.glob(file_path + "*.shed"):

                dict_msg = CF.load_dictionary_object(file)

                jobid = str(dict_msg["cronjobid"])
                m = str(dict_msg["cronminute"])
                h = dict_msg["cronhour"]
                dow = dict_msg["crondateofweek"]

                self.schedule_a_job(jobid, m, h, dow, None)

        except Exception as e:
            Logger.log_debug("ERROR @ load_scheduled_jobs")
            Logger.log_error(str(e))
    
    def schedule_a_job(self, cronjobid, m="0",h='*',dow='*', message_dict=None):

        #-----------------------------------------
        # jobid format :- <device_com_pk><hub_pk>
        #-----------------------------------------
        ret_val = False
        #Jobs_Scheduler.__job_lock.acquire()
        try:

            if m>=0:

                #dict_obj={"jobid":jobid,"m":m,"h":h,"dow":dow}

                ret_val = True
                if not message_dict == None:
                    
                    file_name = '%s%s%s' % (cfg.SCHEDULED_JOBS_FILES_PATH, cronjobid , ".shed")
                    ret_val = CF.save_dictionary_object(file_name, message_dict)

                if ret_val:
                    #print 'adding a schedule'
                    Logger.log_info('Adding scheduler job ' + str(cronjobid))
                    self.__scheduler.add_job(Jobs_Scheduler.scheduler_callback_function
                                                       ,minute=m,hour=h,day_of_week=dow, id=cronjobid,trigger="cron",kwargs={"jobid":cronjobid})

            return ret_val
        except Exception as e:
            Logger.log_debug("ERROR @ schedule_a_job")
            Logger.log_error(str(e))
            return False
        finally:
            #Jobs_Scheduler.__job_lock.release()
            pass
        
    def remove_a_scheduled_job(self, cronjobid):
        #Jobs_Scheduler.__job_lock.acquire()
        try:

            Logger.log_debug("Removing scheduled job " + cronjobid)
            self.__scheduler.remove_job(cronjobid)
            file_name = cfg.SCHEDULED_JOBS_FILES_PATH + cronjobid + ".shed"
            os.remove(file_name)

            return (True,"")

        except Exception as e:
            Logger.log_debug("ERROR @ remove_a_scheduled_job")
            Logger.log_error(str(e))
            return (False,str(e).replace('u',''))
        finally:
            #Jobs_Scheduler.__job_lock.release()
            pass
        
        
    def print_jobs(self):
        try:
            self.__scheduler.print_jobs()
        
        except Exception as e:
            Logger.log_debug("ERROR @ print_jobs")
            Logger.log_error(str(e))
            
    @staticmethod
    def scheduler_callback_function(jobid):

        Jobs_Scheduler.__lock_sending_to_msg_q.acquire()
        try:
            
            jobid = str(jobid)
            
            Logger.log_info("Scheduler triggered for job "+ jobid)
            file_path = cfg.SCHEDULED_JOBS_FILES_PATH
            file_name = file_path + jobid + ".shed"

            dict_msg = CF.load_dictionary_object(file_name)

            if not dict_msg == None:

                # Send the message to MessageProc q
                result_msg = Jobs_Scheduler.send_job_command(dict_msg)
                
        except Exception as e:
            Logger.log_debug("ERROR @ scheduler_callback_function")
            Logger.log_error(str(e))

        finally:
            Logger.log_debug("End of scheduler_callback_function")
            Jobs_Scheduler.__lock_sending_to_msg_q.release()
            
            
    @staticmethod
    def send_job_command(dict_command):
        try:
            
            cronjobid = dict_command['cronjobid']
            device_code = dict_command['devicecode']
            com_id = dict_command['comid']
            hubcondeviceid = dict_command['hubcondeviceid']
            comcode = dict_command['comcode']
            
            dict_command.update({'u_o_s': 'S'})
            
            zc = ZC()
            mp_sock = zc.connect_zmq(cfg.msg_proc_ip, cfg.msg_proc_port, 15000)
            mp_sock.send(json.dumps(dict_command))
            
            ret_msg = None
            ret_msg = zc.zmq_recv(mp_sock, 15000)
            mp_sock.close()
            del mp_sock

            if not ret_msg == None:
                
                ret_msg = json.loads(ret_msg)
                # Send to notification Q
                
                hub_sno = CF.get_hub_serial_no()
                ret_msg.update({'hub_sno': hub_sno})
                ret_msg.update({'notification_type': 'CRON_EVENT'})
                ret_msg.update({'msg_type' :'NOTIFICATION'})
                ret_msg.update({'notification_dt': datetime.datetime.now().strftime("%Y%m%d %H%M%S")})
                ret_msg.update({'cronjobid': cronjobid})
                ret_msg.update({'devicecode': device_code})
                ret_msg.update({'hubcondeviceid': hubcondeviceid})
                ret_msg.update({'comcode': comcode})
                
                Logger.log_debug('Sending ack to notification queue')
                CF.send_to_notifiction_queue(ret_msg)
            else:
                Logger.log_error('No response back from the message processing queue for the scheduled event.')
            
        except Exception as e:
            Logger.log_debug("ERROR @ Jobs_Scheduler -> send_job_command")
            Logger.log_error(str(e))
    
    """
    @staticmethod
    def send_to_notifiction_queue(dict_msg):
        try:
            zc = ZC()
            en_sock = zc.connect_zmq(cfg.EVENTS_PROCESSOR_IP, cfg.EVENTS_PROCESSOR_PORT)
            en_sock.send(json.dumps(dict_msg))
            
            ret_msg = None
            ret_msg = zc.zmq_recv(en_sock)
            en_sock.close()
            del en_sock
            
            if ret_msg == None:
                return False
            else:
                return True
            
        except Exception as e:
            Logger.log_debug("ERROR @ Jobs_Scheduler -> send_to_notifiction_queue()")
            Logger.log_error(str(e))
            return False
    """
    
    
    
示例#45
0
class Trigger:

    def __init__(self, app):
        self.scheduler = None
        self.app = app

    def setup(self):
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': self.app.config["TRIGGER_DATABASE_URL"] #os.environ.get('TRIGGER_DATABASE_URL')
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '30'
            },
            'apscheduler.job_defaults.coalesce': 'false',
            'apscheduler.job_defaults.max_instances': '20',
            'apscheduler.timezone': 'UTC',
        })

    def start(self):

        self.scheduler.start()

    def is_running(self):
        return self.scheduler.running()

    def shutdown(self):
        self.scheduler.shutdown()

    def load_job_list(self):
        with self.app.app_context():
            projects = AutoProject.query.all()
            # key_list = ("minute", "hour", "day", "month", "day_of_week")

            for p in projects:
                if p.enable and self.scheduler.get_job(p.id) is None:
                    cron = p.cron.replace("\n", "").strip().split(" ")
                    #print(cron)
                    if len(cron) < 5:
                        continue
                    j = self.scheduler.add_job(func=run_job, trigger='cron', name=p.name, replace_existing=True,
                                               minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                               id="%s" % p.id, args=(p.id,))
                else:
                    self.update_job(p.id)

    def update_job(self, id):
        with self.app.app_context():
            p = AutoProject.query.filter_by(id=id).first()
            if p.enable:
                cron = p.cron.replace("\n", "").strip().split(" ")
                if len(cron) < 5:
                    return False
                print(self.scheduler.get_job(id))

                if self.scheduler.get_job(id) is None:
                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))
                else:
                    self.remove_job(id)

                    self.scheduler.add_job(func=run_job, trigger='cron', name=p.name,
                                           minute=cron[0], hour=cron[1], day=cron[2], month=cron[3], day_of_week=cron[4],
                                           id="%s" % id, args=(id,))

            return True

    def remove_job(self, id):
        if self.scheduler.get_job(id) is not None:
            self.scheduler.remove_job(id)

    def pause_job(self, id):
        pass

    def resume_job(self, id):
        pass

    def get_jobs(self):
        to_zone = tz.gettz("CST")
        #jobs = self.scheduler.get_jobs()
        urls = {
            "pass": "******",
            "fail": "fail.png",
            "running": "run.gif",
            "none": "project.png"
        }
        projects = AutoProject.query.order_by(AutoProject.id.desc()).all()
        data = {"total": len(projects), "rows": []}

        for p in projects:
            next_run_time = "调度未启动"
            job = self.scheduler.get_job(p.id)
            if job is not None:
                next_run_time = job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")

                # 获取该job下最后一次运行状态
                task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
                if task is not None:
                    output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
                    if os.path.exists(output_dir + "/report.html"):
                        tree = ET.parse(output_dir + "/output.xml")
                        root = tree.getroot()
                        # passed = root.find("./statistics/suite/stat").attrib["pass"]
                        fail = root.find("./statistics/suite/stat").attrib["fail"]
                        if int(fail) != 0:
                            status = 'fail'
                        else:
                            status = 'pass'
            else:
                status = "none"

            data["rows"].append({"id": "%s" % p.id,
                                 "name": p.name,
                                 "enable": p.enable,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": p.cron,
                                 "next_run_time": next_run_time
                                 })

        """
        for job in jobs:
            status = "running"
            task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
            if task is None:
                continue

            output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
            if os.path.exists(output_dir + "/report.html"):
                tree = ET.parse(output_dir + "/output.xml")
                root = tree.getroot()
                #passed = root.find("./statistics/suite/stat").attrib["pass"]
                fail = root.find("./statistics/suite/stat").attrib["fail"]
                if int(fail) != 0:
                    status = 'fail'
                else:
                    status = 'pass'

            data["rows"].append({"id": "%s" % job.id,
                                 "name": job.name,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": AutoProject.query.filter_by(id=job.id).first().cron,
                                 "next_run_time": job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
                                 })
        """

        return data

    def print_jobs(self):
        pass
示例#46
0
class MainRunner(object):

    class FilterAllLog(logging.Filter):
        # default we will filter logger from apscheduler.executors.default, apscheduler.scheduler,
        # you can config filter logger in config.json
        def filter(self, record):
            return ""

    def __init__(self, input_cmd_config_fp, input_job_config_fp, input_config_fp):

        # init value
        cmd_config_fp = os.path.abspath(input_cmd_config_fp)
        job_config_fp = os.path.abspath(input_job_config_fp)
        config_fp = os.path.abspath(input_config_fp)

        # load configuration json files
        self.cmd_config = CommonUtil.load_json_file(cmd_config_fp)
        self.job_config = CommonUtil.load_json_file(job_config_fp)
        self.config = CommonUtil.load_json_file(config_fp)

        # init schedulers
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore('sqlalchemy', url=self.config['job_store_url'])
        self.scheduler.start()

        # init variables
        mananger = Manager()
        self.sync_queue = mananger.Queue()
        self.async_queue = mananger.Queue()
        self.current_job_list = []

        # Slack Sending Queue
        # TODO: prevent the Slack bot is disable, the sending queue will use too much memory.
        self.slack_sending_queue = mananger.Queue(50)

        # init logger
        self.set_logging(self.config['log_level'], self.config['log_filter'])

    def set_logging(self, log_level, log_filter_list):
        default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s'
        default_datefmt = '%Y-%m-%d %H:%M'
        if log_level.lower() == "debug":
            logging.basicConfig(level=logging.DEBUG, format=default_log_format, datefmt=default_datefmt)
        else:
            logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt)

        my_filter = self.FilterAllLog()
        for target_logger in log_filter_list:
            logging.getLogger(target_logger).addFilter(my_filter)

    def scheduler_del_job(self, **kwargs):
        input_cmd_str = kwargs.get("input_cmd_str", "")
        cmd_str_list = input_cmd_str.split(" ")
        if len(cmd_str_list) == 2:
            job_id = cmd_str_list[1]
            current_job_list = self.scheduler.get_jobs()
            current_job_id_list = [j.id for j in current_job_list]
            if job_id in current_job_id_list:
                self.scheduler.remove_job(job_id)
            else:
                logging.error("Cannot find the specify job id [%s]" % job_id)
        else:
            logging.error("Incorrect cmd format! [%s]" % input_cmd_str)

    def scheduler_list_job(self, **kwargs):
        self.scheduler.print_jobs()

    def scheduler_shutdown(self, **kwargs):
        self.scheduler.shutdown()
        sys.exit(0)

    def list_all_commands(self, **kwargs):
        print "Current supported commands as below:"
        print "-" * 80
        for cmd_str in self.cmd_config['cmd-settings']:
            print '{:30s} {:50s} '.format(cmd_str, self.cmd_config['cmd-settings'][cmd_str]['desc'])
        print "-" * 80

    def scheduler_job_handler(self, input_cmd_obj, input_cmd_str):
        cmd_match_pattern = input_cmd_obj.keys()[0]
        func_point = getattr(self, input_cmd_obj[cmd_match_pattern]['func-name'])
        func_point(cmd_configs=input_cmd_obj[cmd_match_pattern]['configs'], input_cmd_str=input_cmd_str)

    def cmd_queue_composer(self, input_cmd_str):
        for cmd_pattern in self.cmd_config['cmd-settings']:
            re_compile_obj = re.compile(cmd_pattern)
            re_match_obj = re_compile_obj.search(input_cmd_str)
            if re_match_obj:
                current_command_obj = self.cmd_config['cmd-settings'][cmd_pattern]
                logging.debug("job matched [%s]" % cmd_pattern)
                target_queue_type = current_command_obj.get('queue-type', None)
                if target_queue_type == "async":
                    self.async_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                elif target_queue_type == "sync":
                    self.sync_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                else:
                    self.scheduler_job_handler({cmd_pattern: current_command_obj}, input_cmd_str)
                break

    def load_default_jobs(self, input_scheduler, input_job_config):
        current_jobs = input_scheduler.get_jobs()
        current_jobs_name = [job.name for job in current_jobs]
        for job_name in input_job_config:
            if input_job_config[job_name]['default-loaded']:
                if job_name not in current_jobs_name:
                    func_point = getattr(importlib.import_module(input_job_config[job_name]['module-path']), job_name)
                    self.scheduler.add_job(func_point, input_job_config[job_name]['trigger-type'],
                                           id=job_name,
                                           seconds=input_job_config[job_name]['interval'],
                                           max_instances=input_job_config[job_name]['max-instances'],
                                           kwargs={
                                               'async_queue': self.async_queue,
                                               'sync_queue': self.sync_queue,
                                               'slack_sending_queue': self.slack_sending_queue,
                                               'configs': input_job_config[job_name]['configs'],
                                               'cmd_config': self.cmd_config}
                                           )

    def job_exception_listener(self, event):
        if event.exception:
            logging.error("Job [%s] crashed [%s]" % (event.job_id, event.exception))
            logging.error(event.traceback)

    def add_event_listener(self):
        self.scheduler.add_listener(self.job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    def run(self):
        # load default job into scheduler if the job is not exist
        self.load_default_jobs(self.scheduler, self.job_config)

        # add event listener into scheduler
        self.add_event_listener()

        # enter the loop to receive the interactive command
        while True:
            user_input = raw_input()
            self.cmd_queue_composer(user_input)
            time.sleep(3)
示例#47
0
class APSModule(object):
    """处理post python模块请求,单例模式运行
    EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED |EVENT_JOB_EXECUTED |
    EVENT_JOB_ERROR | EVENT_JOB_MISSED |EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES
    """
    _instance_lock = threading.Lock()

    def __init__(self):
        self.ModuleJobsScheduler = BackgroundScheduler()
        self.ModuleJobsScheduler.add_listener(self.deal_result)
        self.ModuleJobsScheduler.start()

    def __new__(cls, *args, **kwargs):
        if not hasattr(APSModule, "_instance"):
            with APSModule._instance_lock:
                if not hasattr(APSModule, "_instance"):
                    APSModule._instance = object.__new__(cls)
        return APSModule._instance

    def putin_post_python_module_queue(self, post_module_intent=None):
        try:
            # 存储uuid
            tmp_self_uuid = str(uuid.uuid1())

            # 清空历史记录
            post_module_intent._clean_log()

            logger.warning("模块放入列表:{} job_id: {} uuid: {}".format(
                post_module_intent.NAME, None, tmp_self_uuid))
            post_module_intent.module_self_uuid = tmp_self_uuid
            self.ModuleJobsScheduler.add_job(
                func=post_module_intent._thread_run,
                max_instances=1,
                id=tmp_self_uuid)

            # 放入缓存队列,用于后续删除任务,存储结果等
            req = {
                'broker': post_module_intent.MODULE_BROKER,
                'uuid': tmp_self_uuid,
                'module': post_module_intent,
                'time': int(time.time()),
                'job_id': None,
            }
            Xcache.create_module_task(req)
            Notice.send_info("模块: {} {} 开始执行".format(
                post_module_intent.NAME, post_module_intent._target_str))
            return True
        except Exception as E:
            logger.error(E)
            return False

    def deal_result(self, event=None):
        flag = False
        if event.code == EVENT_JOB_ADDED:
            # print("EVENT_JOB_ADDED")
            pass
        elif event.code == EVENT_JOB_REMOVED:
            # print("EVENT_JOB_REMOVED")
            pass
        elif event.code == EVENT_JOB_MODIFIED:
            # print("EVENT_JOB_MODIFIED")
            pass
        elif event.code == EVENT_JOB_EXECUTED:  # 执行完成
            flag = self.store_executed_result(event.job_id)
        elif event.code == EVENT_JOB_ERROR:
            # print("EVENT_JOB_ERROR")
            flag = self.store_error_result(event.job_id, event.exception)
        elif event.code == EVENT_JOB_MISSED:
            # print("EVENT_JOB_MISSED")
            pass
        elif event.code == EVENT_JOB_SUBMITTED:
            # print("EVENT_JOB_SUBMITTED")
            pass
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            # print("EVENT_JOB_MAX_INSTANCES")
            pass
        else:
            pass
        return flag

    @staticmethod
    def store_executed_result(task_uuid=None):
        req = Xcache.get_module_task_by_uuid(task_uuid=task_uuid)
        if req is None:
            logger.warning("缓存中无对应实例,可能已经模块已经中途退出")
            return False
        module_common_instance = req.get("module")

        # 存储运行结果
        try:
            module_common_instance._store_result_in_history()
            Notice.send_success("模块: {} {} 执行完成".format(
                module_common_instance.NAME,
                module_common_instance._target_str))
            logger.warning("多模块实例执行完成:{}".format(module_common_instance.NAME))
            Xcache.del_module_task_by_uuid(task_uuid=task_uuid)  # 清理缓存信息
            return True
        except Exception as E:
            Xcache.del_module_task_by_uuid(task_uuid=task_uuid)  # 清理缓存信息
            logger.error("多模块实例执行异常:{} 异常信息: {}".format(
                module_common_instance.NAME, E))
            Notice.send_exception("模块: {} 执行异常,异常信息: {}".format(
                module_common_instance.NAME, E))
            logger.error(E)
            return False

    @staticmethod
    def store_error_result(task_uuid=None, exception=None):
        req = Xcache.get_module_task_by_uuid(task_uuid=task_uuid)
        Xcache.del_module_task_by_uuid(task_uuid=task_uuid)  # 清理缓存信息
        module_common_instance = req.get("module")

        # 存储运行结果
        try:
            module_common_instance.log_except(exception)
            module_common_instance._store_result_in_history()
            logger.error("多模块实例执行异常:{} 异常信息: {}".format(
                module_common_instance.NAME, exception))
            Notice.send_exception("模块: {} 执行异常,异常信息: {}".format(
                module_common_instance.NAME, exception))
            return True
        except Exception as E:
            logger.error("多模块实例执行异常:{} 异常信息: {}".format(
                module_common_instance.NAME, E))
            Notice.send_exception("模块: {} 执行异常,异常信息: {}".format(
                module_common_instance.NAME, E))
            logger.error(E)
            return False

    def delete_job_by_uuid(self, task_uuid=None):
        req = Xcache.get_module_task_by_uuid(task_uuid=task_uuid)
        Xcache.del_module_task_by_uuid(task_uuid=task_uuid)  # 清理缓存信息

        # 删除后台任务
        try:
            self.ModuleJobsScheduler.remove_job(task_uuid)
        except Exception as E:
            logger.error(E)

        try:
            module_common_instance = req.get("module")
        except Exception as E:
            logger.error(E)
            return False

        # 存储已经生成的结果
        try:
            module_common_instance.log_info("用户手动删除任务")
            module_common_instance._store_result_in_history()
        except Exception as E:
            logger.error("删除多模块实例异常:{} 异常信息: {}".format(
                module_common_instance.NAME, E))
            Notice.send_exception("模块: {} 执行异常,异常信息: {}".format(
                module_common_instance.NAME, E))
            logger.error(E)
            return False

        # 发送通知
        Notice.send_info("模块: {} {} 手动删除".format(
            module_common_instance.NAME, module_common_instance._target_str))
        logger.warning("多模块实例手动删除:{}".format(module_common_instance.NAME))
        return True
示例#48
0
文件: agent.py 项目: ypwalter/Hasal
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['name'] = "Jenkins Job"
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
        interval = 30
        if 'interval' in data:
            interval = int(data['interval'])

        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = "hasalTask"
            object_name = "HasalTask"
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
示例#49
0
class MetricsTaskManager(manager.Manager):
    """manage periodical tasks"""

    RPC_API_VERSION = '1.0'

    def __init__(self, service_name=None, *args, **kwargs):
        super(MetricsTaskManager, self).__init__(*args, **kwargs)
        scheduler = schedule_manager.SchedulerManager()
        scheduler.start()
        partitioner = ConsistentHashing()
        partitioner.start()
        partitioner.join_group()
        self.watch_job_id = None
        self.cleanup_job_id = None
        self.group = None
        self.watcher = None
        self.scheduler = None
        self.rpcapi = rpcapi.SubprocessAPI()
        self.executor_map = {}
        self.enable_sub_process = CONF.telemetry.enable_dynamic_subprocess
        if self.enable_sub_process:
            self.scheduler = BackgroundScheduler()
            self.scheduler.start()
        self.schedule_boot_jobs(self.host)

    def assign_job(self, context, task_id, executor):
        if not self.enable_sub_process:
            instance = JobHandler.get_instance(context, task_id)
            instance.schedule_job(task_id)
        else:
            if not self.watch_job_id:
                self.init_watchers(executor)
            local_executor = self.get_local_executor(context, task_id, None,
                                                     executor)
            self.rpcapi.assign_job_local(context, task_id, local_executor)

    def remove_job(self, context, task_id, executor):
        if not self.enable_sub_process:
            instance = JobHandler.get_instance(context, task_id)
            instance.remove_job(task_id)
        else:
            job = db.task_get(context, task_id)
            storage_id = job['storage_id']
            for name in self.executor_map.keys():
                if storage_id in self.executor_map[name]["storages"]:
                    local_executor = "{0}:{1}".format(executor, name)
                    self.rpcapi.remove_job_local(context, task_id,
                                                 local_executor)
                    tasks, failed_tasks = self.get_all_tasks(storage_id)
                    if len(failed_tasks) == 0 and len(tasks) == 0:
                        self.stop_executor(name, local_executor, storage_id)

    def assign_failed_job(self, context, failed_task_id, executor):
        if not self.enable_sub_process:
            instance = FailedJobHandler.get_instance(context, failed_task_id)
            instance.schedule_failed_job(failed_task_id)
        else:
            if not self.watch_job_id:
                self.init_watchers(executor)

            local_executor = self.get_local_executor(context, None,
                                                     failed_task_id, executor)
            self.rpcapi.assign_failed_job_local(context, failed_task_id,
                                                local_executor)

    def remove_failed_job(self, context, failed_task_id, executor):
        if not self.enable_sub_process:
            instance = FailedJobHandler.get_instance(context, failed_task_id)
            instance.remove_failed_job(failed_task_id)
        else:
            job = db.failed_task_get(context, failed_task_id)
            storage_id = job['storage_id']
            for name in self.executor_map.keys():
                if storage_id in self.executor_map[name]["storages"]:
                    local_executor = "{0}:{1}".format(executor, name)
                    self.rpcapi.remove_failed_job_local(
                        context, failed_task_id, local_executor)
                    tasks, failed_tasks = self.get_all_tasks(storage_id)
                    if len(failed_tasks) == 0 and len(tasks) == 0:
                        self.stop_executor(name, local_executor, storage_id)

    def schedule_boot_jobs(self, executor):
        """Schedule periodic collection if any task is currently assigned to
        this executor """
        try:
            filters = {'executor': executor, 'deleted': False}
            context = ctxt.get_admin_context()
            tasks = db.task_get_all(context, filters=filters)
            failed_tasks = db.failed_task_get_all(context, filters=filters)
            LOG.info("Scheduling boot time jobs for this executor: total "
                     "jobs to be handled :%s" % len(tasks))
            for task in tasks:
                self.assign_job(context, task['id'], executor)
                LOG.debug('Periodic collection job assigned for id: '
                          '%s ' % task['id'])
            for failed_task in failed_tasks:
                self.assign_failed_job(context, failed_task['id'], executor)
                LOG.debug('Failed job assigned for id: '
                          '%s ' % failed_task['id'])

        except Exception as e:
            LOG.error(
                "Failed to schedule boot jobs for this executor "
                "reason: %s.", six.text_type(e))
        else:
            LOG.debug("Boot job scheduling completed.")

    def init_watchers(self, group):
        watcher = GroupMembership(agent_id=group)
        watcher.start()
        watcher.create_group(group)
        LOG.info('Created child process membership group {0}.'
                 'Initial members of group: {1}'.format(
                     group, watcher.get_members(group)))

        watcher.register_watcher_func(group, self.on_process_join,
                                      self.on_process_leave)
        self.group = group
        self.watcher = watcher
        self.watch_job_id = uuidutils.generate_uuid()
        self.scheduler.add_job(
            watcher.watch_group_change,
            'interval',
            seconds=CONF.telemetry.group_change_detect_interval,
            next_run_time=datetime.datetime.now(),
            id=self.watch_job_id)
        LOG.info(
            'Created watch for group membership change for group {0}.'.format(
                group))
        self.cleanup_job_id = uuidutils.generate_uuid()
        self.scheduler.add_job(self.process_cleanup,
                               'interval',
                               seconds=CONF.telemetry.process_cleanup_interval,
                               next_run_time=datetime.datetime.now(),
                               id=self.cleanup_job_id)
        LOG.info(
            'Created process cleanup background job for group {0}.'.format(
                group))

    def on_process_join(self, event):
        LOG.info('Member %s joined the group %s' %
                 (event.member_id, event.group_id))
        host = event.group_id.decode('utf-8')
        if self.watcher:
            LOG.info('Processes in current node {0}'.format(
                self.watcher.get_members(host)))

    def on_process_leave(self, event):
        LOG.info('Member %s left the group %s' %
                 (event.member_id, event.group_id))
        executor_topic = event.member_id.decode('utf-8')
        name = executor_topic.split(':')[1]
        if name in self.executor_map.keys():
            host = event.group_id.decode('utf-8')
            LOG.info(
                "Re-create process {0} in {1} that is handling tasks".format(
                    executor_topic, host))
            launcher = self.create_process(executor_topic, host)
            self.executor_map[name]["launcher"] = launcher
            context = ctxt.get_admin_context()
            for storage_id in self.executor_map[name]["storages"]:
                tasks, failed_tasks = self.get_all_tasks(storage_id)
                for task in tasks:
                    LOG.info("Re-scheduling task {0} of storage {1}".format(
                        task['id'], storage_id))
                    self.rpcapi.assign_job_local(context, task['id'],
                                                 executor_topic)

                for f_task in failed_tasks:
                    LOG.info("Re-scheduling failed failed task {0},"
                             " of storage {1}".format(f_task['id'],
                                                      storage_id))
                    self.rpcapi.assign_failed_job_local(
                        context, f_task['id'], executor_topic)

    def process_cleanup(self):
        LOG.info('Periodic process cleanup called')
        executor_names = self.executor_map.keys()

        # Collect all names to delete
        names_to_delete = []
        for name in executor_names:
            if len(self.executor_map[name]["storages"]) == 0:
                delay = self.executor_map[name]["cleanup_delay"]
                if delay < 0:
                    LOG.info(
                        "Cleanup delay for local executor {0} expired".format(
                            name))
                    names_to_delete.append(name)
                else:
                    LOG.info(
                        "Delay cleanup for local executor {0} for {1}".format(
                            name, delay))
                    delay = delay - CONF.telemetry.process_cleanup_interval
                    self.executor_map[name]["cleanup_delay"] = delay
        # Delete names
        for name in names_to_delete:
            self.executor_map[name]["launcher"].stop()
            self.executor_map.pop(name)

    def create_process(self, topic=None, host=None):
        metrics_task_server = service. \
            MetricsService.create(binary='delfin-task',
                                  topic=topic,
                                  host=host,
                                  manager='delfin.'
                                          'task_manager.'
                                          'subprocess_manager.'
                                          'SubprocessManager',
                                  coordination=False)
        launcher = oslo_ser.ProcessLauncher(CONF)
        launcher.launch_service(metrics_task_server, workers=1)
        return launcher

    def get_local_executor(self, context, task_id, failed_task_id, executor):
        executor_names = self.executor_map.keys()
        storage_id = None
        if task_id:
            job = db.task_get(context, task_id)
            storage_id = job['storage_id']
        elif failed_task_id:
            job = db.failed_task_get(context, failed_task_id)
            storage_id = job['storage_id']
        else:
            raise exception.InvalidInput("Missing task id")

        # Storage already exists
        for name in executor_names:
            executor_topic = "{0}:{1}".format(executor, name)
            if storage_id in self.executor_map[name]["storages"]:
                return executor_topic

        # Return existing executor_topic
        for name in executor_names:
            no_of_storages = len(self.executor_map[name]["storages"])
            if no_of_storages and (no_of_storages <
                                   CONF.telemetry.max_storages_in_child):
                executor_topic = "{0}:{1}".format(executor, name)
                LOG.info(
                    "Selecting existing local executor {0} for {1}".format(
                        executor_topic, storage_id))
                self.executor_map[name]["storages"].append(storage_id)
                return executor_topic

        # Return executor_topic after creating one
        for index in range(CONF.telemetry.max_childs_in_node):
            name = "executor_{0}".format(index + 1)
            if name not in executor_names:
                executor_topic = "{0}:{1}".format(executor, name)
                LOG.info("Create a new local executor {0} for {1}".format(
                    executor_topic, storage_id))
                launcher = self.create_process(topic=executor_topic,
                                               host=executor)
                self.executor_map[name] = {
                    "storages": [storage_id],
                    "launcher": launcher,
                    "cleanup_delay": 0
                }
                return executor_topic

        msg = "Reached maximum number of ({0}) local executors". \
            format(CONF.telemetry.max_childs_in_node)
        LOG.error(msg)
        raise RuntimeError(msg)

    def get_all_tasks(self, storage_id):
        filters = {'storage_id': storage_id, 'deleted': False}
        context = ctxt.get_admin_context()
        tasks = db.task_get_all(context, filters=filters)
        failed_tasks = db.failed_task_get_all(context, filters=filters)
        return tasks, failed_tasks

    def stop_executor(self, name, local_executor, storage_id):
        LOG.info("Stop and remove local executor {0}".format(local_executor))
        if storage_id in self.executor_map[name]["storages"]:
            self.executor_map[name]["storages"].remove(storage_id)
        self.executor_map[name]["cleanup_delay"] = \
            CONF.telemetry.task_cleanup_delay

    def stop(self):
        """Cleanup periodic jobs"""
        if self.watch_job_id:
            self.scheduler.remove_job(self.watch_job_id)
        if self.cleanup_job_id:
            self.scheduler.remove_job(self.cleanup_job_id)
        if self.group and self.watcher:
            self.watcher.delete_group(self.group)
        if self.watcher:
            self.watcher.stop()
        if self.scheduler:
            self.scheduler.shutdown()
        self.watch_job_id = None
        self.cleanup_job_id = None
        self.group = None
        self.watcher = None
示例#50
0
class Scheduler(object):
    def __init__(self):
        self._scheduler = BackgroundScheduler()
        self._scheduler.start()

    def runCron(self,
                function,
                minute=None,
                hour=None,
                day=None,
                month=None,
                day_week=None,
                year=None,
                job_id=None,
                args=[],
                kwargs={}):
        if job_id is None:
            job_id = str(function)
        logging.info('adding cron job: {}'.format(str(job_id)))
        self._scheduler.add_job(function,
                                'cron',
                                args=args,
                                kwargs=kwargs,
                                minute=minute,
                                hour=hour,
                                day=None,
                                month=month,
                                day_of_week=day_week,
                                year=year,
                                id=job_id,
                                replace_existing=True)

    #TODO: Build cron class
    def runSimpleWeekCron(self,
                          function,
                          minute=None,
                          hour=None,
                          days_of_week=None,
                          job_id=None,
                          args=[],
                          kwargs={}):
        '''days_of_week takes a list of days'''
        if job_id is None:
            job_id = str(function)
        if days_of_week is not None and days_of_week != '*':
            run_days = []
            for d in DAYS_OF_WEEK:
                if d in days_of_week:
                    run_days.append(d)
            days_of_week = ','.join(run_days)
        logging.info('adding simple weekly cron job: {}'.format(str(job_id)))
        self._scheduler.add_job(function,
                                'cron',
                                args=args,
                                kwargs=kwargs,
                                minute=minute,
                                hour=hour,
                                day='*',
                                month='*',
                                day_of_week=days_of_week,
                                year='*',
                                id=job_id,
                                replace_existing=True)

    def runEveryS(self,
                  delay,
                  function,
                  args=[],
                  kwargs={},
                  job_id=None,
                  replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runEveryS job: {}'.format(str(job_id)))
        self._scheduler.add_job(function,
                                'interval',
                                seconds=delay,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runEveryM(self,
                  delay,
                  function,
                  args=[],
                  kwargs={},
                  job_id=None,
                  replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runEveryM job: {}'.format(str(job_id)))
        self._scheduler.add_job(function,
                                'interval',
                                minutes=delay,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runEveryH(self,
                  delay,
                  function,
                  args=[],
                  kwargs={},
                  job_id=None,
                  replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runEveryH job: {}'.format(str(job_id)))
        self._scheduler.add_job(function,
                                'interval',
                                hours=delay,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runInS(self,
               delay,
               function,
               args=[],
               kwargs={},
               job_id=None,
               replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runInS job: {}'.format(str(job_id)))
        run_time = datetime.now() + timedelta(seconds=delay)
        self._scheduler.add_job(function,
                                'date',
                                run_date=run_time,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runInM(self,
               delay,
               function,
               args=[],
               kwargs={},
               job_id=None,
               replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runInM job: {}'.format(str(job_id)))
        run_time = datetime.now() + timedelta(minutes=delay)
        self._scheduler.add_job(function,
                                'date',
                                run_date=run_time,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runInH(self,
               delay,
               function,
               args=[],
               kwargs={},
               job_id=None,
               replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runInH job: {}'.format(str(job_id)))
        run_time = datetime.now() + timedelta(hours=delay)
        self._scheduler.add_job(function,
                                'date',
                                run_date=run_time,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def runAt(self,
              date,
              function,
              args=[],
              kwargs={},
              job_id=None,
              replace=True):
        if job_id is None:
            job_id = str(function)
        logging.info('runAt job: {} date: {}'.format(str(job_id), str(date)))
        self._scheduler.add_job(function,
                                'date',
                                run_date=date,
                                args=args,
                                kwargs=kwargs,
                                id=job_id,
                                replace_existing=replace)

    def cancel(self, job_id):
        try:
            logging.info('canceling job: {}'.format(str(job_id)))
            self._scheduler.remove_job(job_id)
            return True
        except:
            return False
示例#51
0
class Trigger:
    def __init__(self, app):
        self.scheduler = None
        self.app = app

    def setup(self):
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': self.app.config[
                    "TRIGGER_DATABASE_URL"]  #os.environ.get('TRIGGER_DATABASE_URL')
            },
            'apscheduler.executors.processpool': {
                'type': 'threadpool',
                'max_workers': '30'
            },
            'apscheduler.job_defaults.coalesce':
            'false',
            'apscheduler.job_defaults.max_instances':
            '4',
            'apscheduler.timezone':
            'UTC',
        })

    def start(self):

        self.scheduler.start()

    def is_running(self):
        return self.scheduler.running()

    def shutdown(self):
        self.scheduler.shutdown()

    def load_job_list(self):
        with self.app.app_context():
            projects = AutoProject.query.all()
            # key_list = ("minute", "hour", "day", "month", "day_of_week")

            for p in projects:
                if p.enable and self.scheduler.get_job(p.id) is None:
                    cron = p.cron.replace("\n", "").strip().split(" ")
                    #print(cron)
                    if len(cron) < 5:
                        continue
                    j = self.scheduler.add_job(func=run_job,
                                               trigger='cron',
                                               name=p.name,
                                               replace_existing=True,
                                               minute=cron[0],
                                               hour=cron[1],
                                               day=cron[2],
                                               month=cron[3],
                                               day_of_week=cron[4],
                                               id="%s" % p.id,
                                               args=(p.id, ))
                else:
                    self.update_job(p.id)

    def add_job(self, func, name, id, cron):
        if self.scheduler.get_job(id) is None:
            self.scheduler.add_job(func=func,
                                   trigger='cron',
                                   name=name,
                                   minute=cron[0],
                                   hour=cron[1],
                                   day=cron[2],
                                   month=cron[3],
                                   day_of_week=cron[4],
                                   id="%s" % id)

    def update_job(self, id):
        with self.app.app_context():
            p = AutoProject.query.filter_by(id=id).first()
            if p.enable:
                cron = p.cron.replace("\n", "").strip().split(" ")
                if len(cron) < 5:
                    return False
                print(self.scheduler.get_job(id))

                if self.scheduler.get_job(id) is None:
                    self.scheduler.add_job(func=run_job,
                                           trigger='cron',
                                           name=p.name,
                                           minute=cron[0],
                                           hour=cron[1],
                                           day=cron[2],
                                           month=cron[3],
                                           day_of_week=cron[4],
                                           id="%s" % id,
                                           args=(id, ))
                else:
                    self.remove_job(id)

                    self.scheduler.add_job(func=run_job,
                                           trigger='cron',
                                           name=p.name,
                                           minute=cron[0],
                                           hour=cron[1],
                                           day=cron[2],
                                           month=cron[3],
                                           day_of_week=cron[4],
                                           id="%s" % id,
                                           args=(id, ))

            return True

    def remove_job(self, id):
        if self.scheduler.get_job(id) is not None:
            self.scheduler.remove_job(id)

    def pause_job(self, id):
        pass

    def resume_job(self, id):
        pass

    def get_jobs(self):
        to_zone = tz.gettz("CST")
        #jobs = self.scheduler.get_jobs()
        urls = {
            "pass": "******",
            "fail": "fail.png",
            "running": "run.gif",
            "none": "project.png"
        }
        projects = AutoProject.query.order_by(AutoProject.id.desc()).all()
        data = {"total": len(projects), "rows": []}

        for p in projects:
            next_run_time = "调度未启动"
            status = "pass"
            job = self.scheduler.get_job(p.id)
            if job is not None:
                next_run_time = job.next_run_time.astimezone(to_zone).strftime(
                    "%Y-%m-%d %H:%M:%S")

                # 获取该job下最后一次运行状态
                task = AutoTask.query.filter_by(project_id=job.id).order_by(
                    AutoTask.build_no.desc()).first()
                if task is not None:
                    output_dir = os.getcwd() + "/logs/%s/%s" % (
                        task.project_id, task.build_no)
                    if os.path.exists(output_dir + "/report.html"):
                        tree = ET.parse(output_dir + "/output.xml")
                        root = tree.getroot()
                        # passed = root.find("./statistics/suite/stat").attrib["pass"]
                        fail = root.find(
                            "./statistics/suite/stat").attrib["fail"]
                        if int(fail) != 0:
                            status = 'fail'
                        else:
                            status = 'pass'
            else:
                status = "none"

            data["rows"].append({
                "id":
                "%s" % p.id,
                "name":
                p.name,
                "enable":
                p.enable,
                "status":
                status,
                "url":
                url_for('static', filename='images/%s' % urls[status]),
                "cron":
                p.cron,
                "next_run_time":
                next_run_time
            })
        """
        for job in jobs:
            status = "running"
            task = AutoTask.query.filter_by(project_id=job.id).order_by(AutoTask.build_no.desc()).first()
            if task is None:
                continue

            output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id, task.build_no)
            if os.path.exists(output_dir + "/report.html"):
                tree = ET.parse(output_dir + "/output.xml")
                root = tree.getroot()
                #passed = root.find("./statistics/suite/stat").attrib["pass"]
                fail = root.find("./statistics/suite/stat").attrib["fail"]
                if int(fail) != 0:
                    status = 'fail'
                else:
                    status = 'pass'

            data["rows"].append({"id": "%s" % job.id,
                                 "name": job.name,
                                 "status": status,
                                 "url": url_for('static', filename='images/%s' % urls[status]),
                                 "cron": AutoProject.query.filter_by(id=job.id).first().cron,
                                 "next_run_time": job.next_run_time.astimezone(to_zone).strftime("%Y-%m-%d %H:%M:%S")
                                 })
        """

        return data

    def print_jobs(self):
        pass
示例#52
0
class PeriodicManager(LoggingMixin):
    """
    Support cron and interval config
    cron: second minute hour day month day_of_week option(year)
    interval: weeks,days,hours,minutes,seconds
    """
    def __init__(self, mailbox: Mailbox):
        super().__init__()
        self.mailbox = mailbox
        self.sc = BackgroundScheduler()

    def start(self):
        self.sc.start()

    def shutdown(self):
        self.sc.shutdown()

    def _generate_job_id(self, run_id, task_id):
        return '{}:{}'.format(run_id, task_id)

    def add_task(self, run_id, task_id, periodic_config: Dict):
        if 'cron' in periodic_config:

            def build_cron_trigger(expr) -> CronTrigger:
                cron_items = expr.split()
                if len(cron_items) == 7:
                    return CronTrigger(second=cron_items[0],
                                       minute=cron_items[1],
                                       hour=cron_items[2],
                                       day=cron_items[3],
                                       month=cron_items[4],
                                       day_of_week=cron_items[5],
                                       year=cron_items[6])
                elif len(cron_items) == 6:
                    return CronTrigger(second=cron_items[0],
                                       minute=cron_items[1],
                                       hour=cron_items[2],
                                       day=cron_items[3],
                                       month=cron_items[4],
                                       day_of_week=cron_items[5])
                else:
                    raise ValueError(
                        'The cron expression {} is incorrect format, follow the pattern: '
                        'second minute hour day month day_of_week optional(year).'
                        .format(expr))

            self.sc.add_job(id=self._generate_job_id(run_id, task_id),
                            func=trigger_periodic_task,
                            args=(self.mailbox, run_id, task_id),
                            trigger=build_cron_trigger(
                                periodic_config['cron']))
        elif 'interval' in periodic_config:
            interval_expr: str = periodic_config['interval']
            interval_items = interval_expr.split(',')
            if len(interval_items) != 5:
                raise ValueError(
                    'The interval expression {} is incorrect format, follow the pattern: '
                    'weeks,days,hours,minutes,seconds.'.format(interval_expr))
            temp_list = []
            is_zero = True
            for item in interval_items:
                if item is None or '' == item.strip():
                    v = 0
                else:
                    v = int(item.strip())
                if v < 0:
                    raise Exception(
                        'The item of interval expression must be greater than or equal to 0.'
                    )
                if v > 0:
                    is_zero = False
                temp_list.append(v)
            if is_zero:
                raise Exception('The interval config must be greater than 0.')

            self.sc.add_job(id=self._generate_job_id(run_id, task_id),
                            func=trigger_periodic_task,
                            args=(self.mailbox, run_id, task_id),
                            trigger=IntervalTrigger(seconds=temp_list[4],
                                                    minutes=temp_list[3],
                                                    hours=temp_list[2],
                                                    days=temp_list[1],
                                                    weeks=temp_list[0]))
        else:
            self.log.error(
                'Periodic support type cron or interval. current periodic config {}'
                .format(periodic_config))

    def remove_task(self, run_id, task_id):
        self.sc.remove_job(job_id=self._generate_job_id(run_id, task_id))
示例#53
0
def main():
    TIMEZONE = os.getenv('TIMEZONE', 'Asia/Shanghai')

    client = docker.DockerClient(base_url='unix://var/run/docker.sock',
                                 timeout=100)
    events = client.events(decode=True)

    scheduler = BackgroundScheduler(
        executors={'default': ThreadPoolExecutor(40)}, timezone=TIMEZONE)

    logging.basicConfig(stream=sys.stdout)
    logging.getLogger('apscheduler').setLevel(logging.INFO)

    try:
        scheduler.start()
    except:
        pass

    # 监控services的启动、更新、关闭,实时进行定时任务的创建和修改
    while True:

        # 反复查询正在运行中的容器的cron.d目录,调整job列表
        try:
            containers = client.containers.list()
        except:
            continue

        scheduled_jobs = scheduler.get_jobs()
        hashed_scheduled_jobs = {}
        for job in scheduled_jobs:
            # 若存储器中的任务所属容器当前不存在,则在存储请中删除此任务
            job_container = job.args[0]
            job_command = job.args[1]
            job_hash = job.args[2]
            if job_container not in containers:
                scheduler.remove_job(job_id=job.id)
            else:
                hashed_scheduled_jobs[job_hash] = job

        for container in containers:
            try:
                cmd = "sh -c '[ -d /etc/cron.d ] && find /etc/cron.d ! -name \".*\" -type f -exec cat \{\} \;'"
                exit_code, output = container.exec_run(cmd=cmd,
                                                       stderr=False,
                                                       tty=True)
                tab = output.decode().replace('\t', ' ')
            except:
                continue

            if tab == '':
                continue

            cron_jobs = CronTab(tab=tab, user=False)
            job_hashes = []
            for job in cron_jobs:
                if not job.is_enabled():
                    continue
                job_hash = hashsum(container.name + ': ' + str(job))
                if job_hash not in hashed_scheduled_jobs:
                    slices = str(job.slices)
                    if slices.startswith('@'):
                        slices = SPECIALS[slices.lstrip('@')]
                    scheduler.add_job(lambda c, cmd, _: c.exec_run(
                        cmd=cmd, stderr=False, tty=True),
                                      CronTrigger.from_crontab(slices),
                                      args=[container, job.command, job_hash],
                                      name=job.command)
                else:
                    del hashed_scheduled_jobs[job_hash]

        for job_hash, job in hashed_scheduled_jobs.items():
            # 未命中的
            scheduler.remove_job(job_id=job.id)

        time.sleep(10)