class UpdateSuccessRate:
    """
    Author: Marten Bolin
    Date: 2017-11-22
    Last update:
    Purpose: This will be ran continously to check for any changes made and rearrange the model
    """

    def __init__(self, daemon=False):
        """
        Author: Marten Bolin
        Date: 2017-11-22
        Last update:
        Purpose: The instantiation of the class, make sure to catch it in a variable so it can be
        terminated properly. Start the scheduler
        :param daemon : Sets daemon, (Optional) Default is False
        """

        # Set up and start the scheduler
        self.scheduled = BackgroundScheduler()
        if not daemon:
            # TODO Should use a good method that doesnt change a protected member
            # TODO of a class directly
            self.scheduled._daemon = False
        self.scheduled.add_job(self._run, 'interval', days=1, id="3")
        self.scheduled.start()
        self.scheduled.modify_job(job_id="3", next_run_time=datetime.now())

    @staticmethod
    def _run():
        """
        Author: Marten Bolin, John Andree Lidquist
        Date:2017-11-22
        Last update: 2017-11-23
        Purpose: The actual process to be ran. Adds the success rate to the database.
        """
        print("Updating success rate")
        InsertSuccessRate().insert_success_rate()

    def terminate(self):
        """
        Author: Marten Bolin
        Date: 2017-11-22
        Last update:
        Purpose: Terminates the process
        """
        print("Shutting down update_success_rate..")
        self.scheduled.shutdown()
        print("Update_success_rate has been shut down.")
Beispiel #2
0
class Scheduler(object):
    def __init__(self):
        self._scheduler = BackgroundScheduler(executors=executors,
                                              job_defaults=job_defaults)
        self._scheduler.add_jobstore('redis',
                                     jobs_key='crontpy.jobs',
                                     run_times_key='crontpy.run_times')

    @property
    def running(self):
        return self._scheduler.running

    def start(self):
        self._scheduler.start()

    def shutdown(self, wait=True):
        self._scheduler.shutdown(wait)

    def pause(self):
        self._scheduler.pause()

    def resume(self):
        self._scheduler.resume()

    def get_jobs(self):
        return self._scheduler.get_jobs()

    def get_job(self, jid):
        return self._scheduler.get_job(job_id=jid)

    def run_job(self, jid):
        job = self.get_job(jid)
        if not job:
            raise Exception('job id:{0} not found'.format(jid))
        job.func(*job.args, **job.kwargs)

    def resume_job(self, jid):
        self._scheduler.resume_job(job_id=jid)

    def pause_job(self, jid):
        self._scheduler.pause_job(job_id=jid)

    def modify_job(self, jid, **changes):
        return self._scheduler.modify_job(job_id=jid, **changes)

    def delete_job(self, jid):
        self._scheduler.remove_job(job_id=jid)
Beispiel #3
0
class Trigger:
    def __init__(self, app):
        self.scheduler = None
        self.app = app

    def setup(self):
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': self.app.config[
                    "TRIGGER_DATABASE_URL"]  #os.environ.get('TRIGGER_DATABASE_URL')
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '30'
            },
            'apscheduler.job_defaults.coalesce':
            'false',
            'apscheduler.job_defaults.max_instances':
            '20',
            'apscheduler.timezone':
            'UTC',
        })

    def start(self):

        self.scheduler.start()

    def is_running(self):
        return self.scheduler.running()

    def shutdown(self):
        self.scheduler.shutdown()

    def load_job_list(self):
        with self.app.app_context():
            projects = AutoProject.query.all()
            # key_list = ("minute", "hour", "day", "month", "day_of_week")

            for p in projects:
                if self.scheduler.get_job(p.id) is None:
                    cron = p.cron.replace("\n", "").strip().split(" ")
                    #print(cron)
                    if len(cron) < 5:
                        continue
                    j = self.scheduler.add_job(func=run_job,
                                               trigger='cron',
                                               name=p.name,
                                               replace_existing=True,
                                               minute=cron[0],
                                               hour=cron[1],
                                               day=cron[2],
                                               month=cron[3],
                                               day_of_week=cron[4],
                                               id="%s" % p.id,
                                               args=(p.id, ))

    def update_job(self, id):
        with self.app.app_context():
            p = AutoProject.query.filter_by(id=id).first()
            cron = p.cron.replace("\n", "").strip().split(" ")
            if len(cron) < 5:
                return False

            if self.scheduler.get_job(id) is None:
                self.scheduler.add_job(func=run_job,
                                       trigger='cron',
                                       name=p.name,
                                       eplace_existing=True,
                                       minute=cron[0],
                                       hour=cron[1],
                                       day=cron[2],
                                       month=cron[3],
                                       day_of_week=cron[4],
                                       id="%s" % id,
                                       args=(id, ))
            else:
                self.scheduler.modify_job(job_id="%s" % id,
                                          name=p.name,
                                          eplace_existing=True,
                                          minute=cron[0],
                                          hour=cron[1],
                                          day=cron[2],
                                          month=cron[3],
                                          day_of_week=cron[4],
                                          args=(id, ))

            return True

    def remove_job(self, id):
        if self.scheduler.get_job(id) is not None:
            self.scheduler.remove_job(id)

    def pause_job(self, id):
        pass

    def resume_job(self, id):
        pass

    def get_jobs(self):
        to_zone = tz.gettz("CST")
        jobs = self.scheduler.get_jobs()
        data = {"total": len(jobs), "rows": []}
        urls = {
            "pass": "******",
            "fail": "fail.png",
            "running": "run.gif"
        }
        for job in jobs:
            status = "running"
            task = AutoTask.query.filter_by(project_id=job.id).order_by(
                AutoTask.build_no.desc()).first()
            output_dir = os.getcwd() + "/logs/%s/%s" % (task.project_id,
                                                        task.build_no)
            if os.path.exists(output_dir + "/report.html"):
                tree = ET.parse(output_dir + "/output.xml")
                root = tree.getroot()
                #passed = root.find("./statistics/suite/stat").attrib["pass"]
                fail = root.find("./statistics/suite/stat").attrib["fail"]
                if int(fail) != 0:
                    status = 'fail'
                else:
                    status = 'pass'

            data["rows"].append({
                "id":
                job.id,
                "name":
                job.name,
                "status":
                status,
                "url":
                url_for('static', filename='images/%s' % urls[status]),
                "cron":
                AutoProject.query.filter_by(id=job.id).first().cron,
                "next_run_time":
                job.next_run_time.astimezone(to_zone).strftime(
                    "%Y-%m-%d %H:%M:%S")
            })

        return data

    def print_jobs(self):
        pass
Beispiel #4
0
class BileanScheduler(object):
    """Billing scheduler based on apscheduler"""

    job_types = (
        NOTIFY, DAILY, FREEZE,
    ) = (
        'notify', 'daily', 'freeze',
    )
    trigger_types = (DATE, CRON) = ('date', 'cron')

    def __init__(self, **kwargs):
        super(BileanScheduler, self).__init__()
        self._scheduler = BackgroundScheduler()
        self.notifier = notifier.Notifier()
        self.engine_id = kwargs.get('engine_id', None)
        self.context = kwargs.get('context', None)
        if not self.context:
            self.context = bilean_context.get_admin_context()
        if cfg.CONF.bilean_task.store_ap_job:
            self._scheduler.add_jobstore(cfg.CONF.bilean_task.backend,
                                         url=cfg.CONF.bilean_task.connection)

    def init_scheduler(self):
        """Init all jobs related to the engine from db."""
        jobs = db_api.job_get_all(self.context, engine_id=self.engine_id)
        if not jobs:
            LOG.info(_LI("No job found from db"))
            return True
        for job in jobs:
            if self.bilean_scheduler.is_exist(job.id):
                continue
            task_name = "_%s_task" % (job.job_type)
            task = getattr(self, task_name)
            self.bilean_task.add_job(task, job.id,
                                     job_type=job.job_type,
                                     params=job.parameters)

    def add_job(self, task, job_id, trigger_type='date', **kwargs):
        """Add a job to scheduler by given data.

        :param str|unicode user_id: used as job_id
        :param datetime alarm_time: when to first run the job

        """
        mg_time = cfg.CONF.bilean_task.misfire_grace_time
        job_time_zone = cfg.CONF.bilean_task.time_zone
        user_id = job_id.split('-')[1]
        if trigger_type == 'date':
            run_date = kwargs.get('run_date')
            if run_date is None:
                msg = "Param run_date cannot be None for trigger type 'date'."
                raise exception.InvalidInput(reason=msg)
            self._scheduler.add_job(task, 'date',
                                    timezone=job_time_zone,
                                    run_date=run_date,
                                    args=[user_id],
                                    id=job_id,
                                    misfire_grace_time=mg_time)
            return True

        # Add a cron type job
        hour = kwargs.get('hour', None)
        minute = kwargs.get('minute', None)
        if not hour or not minute:
            hour, minute = self._generate_timer()
        self._scheduler.add_job(task, 'cron',
                                timezone=job_time_zone,
                                hour=hour,
                                minute=minute,
                                args=[user_id],
                                id=job_id,
                                misfire_grace_time=mg_time)
        return True

    def modify_job(self, job_id, **changes):
        """Modifies the properties of a single job.

        Modifications are passed to this method as extra keyword arguments.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.modify_job(job_id, **changes)

    def remove_job(self, job_id):
        """Removes a job, preventing it from being run any more.

        :param str|unicode job_id: the identifier of the job
        """

        self._scheduler.remove_job(job_id)

    def start(self):
        LOG.info(_('Starting Billing scheduler'))
        self._scheduler.start()

    def stop(self):
        LOG.info(_('Stopping Billing scheduler'))
        self._scheduler.shutdown()

    def is_exist(self, job_id):
        """Returns if the Job exists that matches the given ``job_id``.

        :param str|unicode job_id: the identifier of the job
        :return: True|False
        """

        job = self._scheduler.get_job(job_id)
        return job is not None

    def _notify_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        msg = {'user': user.id, 'notification': 'The balance is almost use up'}
        self.notifier.info('billing.notify', msg)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'notify'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))
        self._add_freeze_job(user)

    def _daily_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'daily'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _freeze_task(self, user_id):
        user = user_mod.User.load(self.context, user_id=user_id)
        if user.status != user.FREEZE and user.rate > 0:
            user.do_bill(self.context)
        try:
            db_api.job_delete(
                self.context, self._generate_job_id(user.id, 'freeze'))
        except exception.NotFound as e:
            LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def _add_notify_job(self, user):
        if not user.rate:
            return False
        total_seconds = user['balance'] / user['rate']
        prior_notify_time = cfg.CONF.bilean_task.prior_notify_time * 3600
        notify_seconds = total_seconds - prior_notify_time
        notify_seconds = notify_seconds if notify_seconds > 0 else 0
        run_date = timeutils.utcnow() + timedelta(seconds=notify_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user['id'], self.NOTIFY)
        self.add_job(self._notify_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.NOTIFY,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)

    def _add_freeze_job(self, user):
        if not user.rate:
            return False
        total_seconds = user.balance / user.rate
        run_date = timeutils.utcnow() + timedelta(seconds=total_seconds)
        job_params = {'run_date': run_date}
        job_id = self._generate_job_id(user.id, self.FREEZE)
        self.add_job(self._freeze_task, job_id, params=job_params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.FREEZE,
               'engine_id': self.engine_id,
               'parameters': {'run_date': run_date}}
        db_api.job_create(self.context, job)
        return True

    def _add_daily_job(self, user):
        job_id = self._generate_job_id(user.id, self.DAILY)
        params = {'hour': random.randint(0, 23),
                  'minute': random.randint(0, 59)}
        self.add_job(self._daily_task, job_id, trigger_type='cron',
                     params=params)
        # Save job to database
        job = {'id': job_id,
               'job_type': self.DAILY,
               'engine_id': self.engine_id,
               'parameters': params}
        db_api.job_create(self.context, job)
        return True

    def _delete_all_job(self, user):
        for job_type in self.job_types:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

    def update_user_job(self, user):
        """Update user's billing job"""
        if user.status not in [user.ACTIVE, user.WARNING]:
            self._delete_all_job(user.id)
            return

        for job_type in self.NOTIFY, self.FREEZE:
            job_id = self._generate_job_id(user.id, job_type)
            if self.is_exist(job_id):
                self.remove_job(job_id)
            try:
                db_api.job_delete(self.context, job_id)
            except exception.NotFound as e:
                LOG.warn(_("Failed in deleting job: %s") % six.text_type(e))

        daily_job_id = self._generate_job_id(user.id, self.DAILY)
        if not self.is_exist(daily_job_id):
            self._add_daily_job(user)

        if user.status == user.ACTIVE:
            self._add_notify_job(user)
        else:
            self._add_freeze_job(user)

    def _generate_timer(self):
        """Generate a random timer include hour and minute."""
        hour = random.randint(0, 23)
        minute = random.randint(0, 59)
        return hour, minute

    def _generate_job_id(self, user_id, job_type):
        """Generate job id by given user_id and job type"""
        return "%s-%s" % (job_type, user_id)
Beispiel #5
0
class CronManager:
    def __init__(self, use_mongo_db=True):

        self.scheduler = BackgroundScheduler(timezone=shanghai_tz)
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronTab',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        if not isinstance(cron_instance, Cron):
            raise TypeError('please add correct cron!')

        if cron_instance.trigger_type == 'interval':
            seconds = cron_instance.trigger_args.get('seconds')
            if not isinstance(seconds,
                              int) and not common.can_convert_to_int(seconds):
                raise TypeError('请输入合法的时间间隔!')
            seconds = int(seconds)
            if seconds <= 0:
                raise TypeError('请输入大于0的时间间隔!')
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                seconds=seconds,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id(),
                max_instances=5,
                jitter=0)  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'date':
            run_date = cron_instance.trigger_args.get('run_date')
            # TODO 判断run_date类型
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                run_date=run_date,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_id())  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'cron':
            raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

        return cron_instance.get_id()

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_id, cron_info):
        if not isinstance(cron_id, str):
            raise TypeError('cron_id must be str')

        if not isinstance(cron_info, dict):
            raise TypeError('cron_info must be dict')

        trigger_type = cron_info.get('triggerType')
        interval = cron_info.get('interval')
        run_date = cron_info.get('runDate')
        test_case_suite_id_list = cron_info.get('testCaseSuiteIdList')
        is_execute_forbiddened_case = cron_info.get('isExecuteForbiddenedCase')
        test_case_id_list = cron_info.get('testCaseIdList')
        test_domain = cron_info.get('testDomain')
        global_vars_id = cron_info.get('globalVarsId')
        alarm_mail_list = cron_info.get('alarmMailList')
        is_ding_ding_notify = cron_info.get('isDingDingNotify')
        ding_ding_access_token = cron_info.get('dingdingAccessToken')
        ding_ding_notify_strategy = cron_info.get('dingdingNotifyStrategy')
        is_enterprise_wechat_notify = cron_info.get('isEnterpriseWechatNotify')
        enterprise_wechat_access_token = cron_info.get(
            'enterpriseWechatAccessToken')
        enterprise_wechat_notify_strategy = cron_info.get(
            'enterpriseWechatNotifyStrategy')
        cron_name = cron_info.get('name')

        try:
            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                # TODO 判断run_date类型
                self.scheduler.modify_job(
                    job_id=cron_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    global_vars_id=global_vars_id,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    run_date=run_date,
                    cron_name=cron_name)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    test_case_suite_id_list=test_case_suite_id_list,
                    is_execute_forbiddened_case=is_execute_forbiddened_case,
                    test_domain=test_domain,
                    global_vars_id=global_vars_id,
                    alarm_mail_list=alarm_mail_list,
                    is_ding_ding_notify=is_ding_ding_notify,
                    ding_ding_access_token=ding_ding_access_token,
                    ding_ding_notify_strategy=ding_ding_notify_strategy,
                    is_enterprise_wechat_notify=is_enterprise_wechat_notify,
                    enterprise_wechat_access_token=
                    enterprise_wechat_access_token,
                    enterprise_wechat_notify_strategy=
                    enterprise_wechat_notify_strategy,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    test_case_id_list=test_case_id_list,
                    seconds=interval,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    cron_name=cron_name)
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_id,
                                      coalesce=True,
                                      args=[cron])

        except BaseException as e:
            raise TypeError('更新定时任务失败: %s' % e)

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_crons(self):
        return self.scheduler.get_jobs()
Beispiel #6
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['name'] = "Jenkins Job"
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
        interval = 30
        if 'interval' in data:
            interval = int(data['interval'])

        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = "hasalTask"
            object_name = "HasalTask"
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
Beispiel #7
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['name'] = "Jenkins Job"
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
        interval = 30
        if 'interval' in data:
            interval = int(data['interval'])

        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(
                    self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name)

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = "hasalTask"
            object_name = "HasalTask"
            try:
                runner_module = getattr(importlib.import_module(module_path),
                                        object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run,
                                   'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval)
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
class JobScheduler(ISingleton):
    @inject
    def __init__(
        self,
        database_config: DatabaseConfig,
        database_session_manager: DatabaseSessionManager,
    ):
        self.database_session_manager: DatabaseSessionManager = database_session_manager
        self.database_config: DatabaseConfig = database_config
        self.scheduler: BackgroundScheduler = None

    def run(self):
        self.run_scheduler()
        print("job_process started")

    def run_scheduler(self):
        jobstores = {
            'default':
            SQLAlchemyJobStore(url=self.database_config.connection_string,
                               tablename='ApSchedulerJobsTable',
                               engine=self.database_session_manager.engine,
                               metadata=IocManager.Base.metadata,
                               tableschema='Aps')
        }
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 5}
        self.scheduler = BackgroundScheduler(daemon=True,
                                             jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults)
        JobSchedulerEvent.job_scheduler_type = JobScheduler
        self.scheduler.add_listener(JobSchedulerEvent.listener_finish,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_added,
                                    EVENT_JOB_ADDED)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_submitted,
                                    EVENT_JOB_SUBMITTED)
        self.scheduler.add_listener(JobSchedulerEvent.listener_job_removed,
                                    EVENT_JOB_REMOVED)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_all_jobs_removed,
            EVENT_ALL_JOBS_REMOVED)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_job_others,
            EVENT_JOB_MODIFIED | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES)
        self.scheduler.add_listener(
            JobSchedulerEvent.listener_scheduler_other_events,
            EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN
            | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED
            | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED
            | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED)
        self.scheduler.start()

        print('To clear the alarms, delete the example.sqlite file.')
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

    def add_job_with_date(self,
                          job_function,
                          run_date,
                          args=None,
                          kwargs=None) -> Job:
        # if run_date is None:
        #     run_date = datetime.now() + timedelta(seconds=10)
        job: Job = self.scheduler.add_job(job_function,
                                          'date',
                                          run_date=run_date,
                                          misfire_grace_time=30000,
                                          args=args,
                                          kwargs=kwargs)
        return job

    def add_job_with_cron(self,
                          job_function,
                          cron: CronTrigger,
                          args=None,
                          kwargs=None) -> Job:
        # if cron.start_date is not None and cron.start_date < datetime.now().astimezone(get_localzone()):
        #     cron.start_date = None
        # if cron.end_date is not None and cron.end_date < datetime.now().astimezone(get_localzone()):
        #     cron.end_date = None
        job: Job = self.scheduler.add_job(job_function,
                                          cron,
                                          misfire_grace_time=15,
                                          args=args,
                                          kwargs=kwargs)
        return job

    def remove_job(self, job_id):
        self.scheduler.remove_job(job_id)

    def modify_job(self, job_id, jobstore=None, **changes):
        return self.scheduler.modify_job(job_id, jobstore, **changes)

    def reschedule_job(self,
                       job_id,
                       jobstore=None,
                       trigger=None,
                       **trigger_args):
        return self.scheduler.reschedule_job(job_id, jobstore, trigger,
                                             **trigger_args)

    def pause_job(self, job_id, jobstore=None):
        return self.scheduler.pause_job(job_id, jobstore)

    def resume_job(self, job_id, jobstore=None):
        return self.scheduler.resume_job(job_id, jobstore)

    def remove_job(self, job_id, jobstore=None):
        self.scheduler.remove_job(job_id, jobstore)

    def get_job(self, job_id):
        return self.scheduler.get_job(job_id)

    def get_jobs(self, jobstore=None):
        return self.scheduler.get_jobs(jobstore)
Beispiel #9
0
    },
    'apscheduler.executors.default': {
        'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
    },
})


@scheduler.scheduled_job('interval', id='pull_latest_image',
    seconds=pull_interval, timezone=utc)
def pull_latest_image():
    # Try to pull a new image
    if interactions.pull(image_name):
        # If we got a new image, trigger a rebalance
        balance_containers.trigger()

pull_latest_image.trigger = lambda: scheduler.modify_job(
    'pull_latest_image', next_run_time=datetime.now())


@scheduler.scheduled_job('interval', id='balance_containers',
    seconds=rebalance_interval, timezone=utc)
def balance_containers():
    try:
        # Clean up before interacting with the pool:
        # - checks in containers that are checked out longer than the max lifetime
        # - stops containers that aren't running if their image is out of date
        # - stops containers from the pool not running selenium
        for container in interactions.containers():
            if container.checked_out:
                if ((datetime.utcnow() - container.checked_out).total_seconds()
                        > max_checkout_time):
                    logger.info('Container %s checked out longer than %d seconds, forcing stop',
Beispiel #10
0
class SchedulerManager(object):

    # __metaclass__ = ABCMeta
    global _mongoclient

    def __init__(self):
        self.jobstores = {
            'mongo':
            MongoDBJobStore(collection='job1',
                            database='saasjob',
                            client=_mongoclient),
            'default':
            MemoryJobStore()
        }
        self.executors = {
            'default': ThreadPoolExecutor(1),
            'processpool': ProcessPoolExecutor(1)
        }
        self.job_defaults = {
            'coalesce': False,
            'misfire_grace_time': 1,
            'max_instances': 1
        }
        self._sched = BackgroundScheduler(jobstores=self.jobstores,
                                          executors=self.executors,
                                          job_defaults=self.job_defaults)
        # 添加 任务提交 事件监听
        self._sched.add_listener(self.when_job_submitted, EVENT_JOB_SUBMITTED)
        # 添加 任务执行完成 事件监听
        self._sched.add_listener(self.when_job_executed, EVENT_JOB_EXECUTED)
        # 添加 任务异常退出 事件监听
        self._sched.add_listener(self.when_job_crashed, EVENT_JOB_ERROR)
        self._jobs = {}
        self._jobhandlers = {}  # format, key: jobid,  value: jobhandler
        self._jobs_key = ["name", "func", "args", "kwargs"]
        self.start()

    def cmd_valid(self, cmd):
        cmd = cmd.strip()
        if cmd.startswith("python"):
            return True
        else:
            return False

    def get_job_trigger(self, _job):
        # ('trigger', <CronTrigger (second='4', timezone='Asia/Shanghai')>)
        _trigger = self._get_job_attr(_job, "trigger")
        # options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
        if _trigger:
            return dict([(f.name, f.__str__()) for f in _trigger.fields
                         if not f.is_default])
        else:
            return {}

    # 获取job属性
    def _get_job_attr(self, _job, attr):
        try:
            result = eval("_job.%s" % attr)
            return result
        except:
            import traceback
            print(traceback.print_exc())
            return None

    def when_job_submitted(self, event):
        try:
            job_id = event.job_id
            if job_id not in self._jobhandlers and job_id in self._jobhandlers:
                self._jobhandlers.setdefault(job_id,
                                             JobHandler(self._jobs[job_id]))
            jobhandler = self._jobhandlers[event.job_id]
            jobhandler.when_job_submitted()
            print("%s submitted at %s" %
                  (event.job_id,
                   time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    def when_job_executed(self, event):
        try:
            job_id = event.job_id
            if job_id not in self._jobhandlers:
                self._jobhandlers.setdefault(job_id,
                                             JobHandler(self._jobs[job_id]))
            jobhandler = self._jobhandlers[event.job_id]
            jobhandler.when_job_executed()
            print("%s executed at %s" %
                  (event.job_id,
                   time.strftime("%Y-%m-%d %H:%M:%S",
                                 time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    def when_job_crashed(self, event):
        try:
            if event.exception:
                job_id = event.job_id
                if job_id not in self._jobhandlers:
                    self._jobhandlers.setdefault(
                        job_id, JobHandler(self._jobs[job_id]))
                jobhandler = self._jobhandlers[event.job_id]
                jobhandler.when_job_crashed()
                print("%s crashed at %s" %
                      (event.job_id,
                       time.strftime("%Y-%m-%d %H:%M:%S",
                                     time.localtime(time.time()))))
        except:
            import traceback
            print(traceback.print_exc())

    # 添加例行任务,crontab 格式
    def addCron(self, cmd, **params):
        try:
            create_jobid = uuid.uuid4().hex
            if not self.cmd_valid(cmd):
                return {"errinfo": "wrong cmd"}
            jobcmdobj = JobCmd(cmd)
            data = params.get("data", {})
            jobcmdobj.set_jobid(create_jobid)
            s = params.get("second",
                           None) if params.get("second", None) != "*" else None
            m = params.get("minute",
                           None) if params.get("minute", None) != "*" else None
            h = params.get("hour",
                           None) if params.get("hour", None) != "*" else None
            d = params.get("day",
                           None) if params.get("day", None) != "*" else None
            dw = params.get(
                "day_of_week",
                None) if params.get("day_of_week", None) != "*" else None
            mnth = params.get(
                "month", None) if params.get("month", None) != "*" else None
            y = params.get("year",
                           None) if params.get("year", None) != "*" else None
            _job = self._sched.add_job(jobcmdcallable,
                                       'cron',
                                       year=y,
                                       month=mnth,
                                       day=d,
                                       day_of_week=dw,
                                       hour=h,
                                       minute=m,
                                       second=s,
                                       args=[jobcmdobj, data],
                                       executor="processpool",
                                       jobstore="mongo",
                                       id=create_jobid)
            self._jobhandlers.setdefault(create_jobid, JobHandler(_job))
            # 保存 job 属性
            return {"job_id": create_jobid}
        except:
            import traceback
            print(traceback.print_exc(), cmd, params)
            return False

    # 修改 job 属性
    def modifyJobAttr(self, job_id, **changes):
        try:
            _job = self._sched.modify_job(job_id=job_id, **changes)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, changes)
            return False

    def modifyJobData(self, job_id, data):
        try:
            args = self._get_job_attr(self._jobhandlers[job_id].job, "args")
            # args_copy = [item for item in args]
            for key in data:
                args[1][key] = data[key]
            _job = self._sched.modify_job(job_id, args=args)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, data)
            return False

    # 修改执行时间,crontab 格式
    def modifyJobFreq(self, job_id, cronargs):
        try:
            _job = self._sched.reschedule_job(job_id,
                                              trigger='cron',
                                              **cronargs)
            self._jobs[job_id] = _job
            if job_id in self._jobhandlers:
                self._jobhandlers[job_id].job = _job
            else:
                self._jobhandlers.setdefault(job_id, JobHandler(_job))
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id, cronargs)
            return False

    # 删除 job
    def removeFromCron(self, job_id):
        try:
            self._sched.remove_job(job_id)
            if job_id in self._jobhandlers:
                self._jobhandlers.pop(job_id)
            if job_id in self._jobs:
                self._jobs.pop(job_id)
            return True
        except:
            import traceback
            print(traceback.print_exc(), job_id)
            return False

    def job_exists(self, job_id):
        if job_id in self._jobhandlers or job_id in self._jobs:
            if job_id not in self._jobhandlers and job_id in self._jobs:
                self._jobhandlers[job_id] = JobHandler(self._jobs[job_id])
            elif job_id in self._jobhandlers and job_id not in self._jobs:
                self._jobs[job_id] = self._jobhandlers[job_id].job
            return True
        else:
            return False

    # 根据 job id 查询任务信息
    def findCronJob(self, job_ids):
        result = []
        _keys = [
            "cmd", "create_stamp", "is_running", "start_stamp", "hope_runtime",
            "is_success", "is_pause", "status", "name", "desc", "allowmodify"
        ]
        for job_id in job_ids:
            print("job_exists", self.job_exists(job_id))
            if self.job_exists(job_id):
                _jobhander = self._jobhandlers[job_id]
                job_info = _jobhander.jobhandlerattr
                cron_trigger = {}
                # cron_trigger = self.get_cron_trigger(_jobhander.job)
                tmp = {}
                tmp["job_id"] = job_id
                if job_info["is_running"]:
                    execute_time = time.time() - job_info["start_stamp"]
                    tmp["running_time"] = round(execute_time, 3)
                else:
                    tmp["running_time"] = round(job_info["hope_runtime"], 3)
                for key in _keys:
                    v = job_info.get(key, None)
                    if key == "is_running":
                        tmp["finished"] = False if job_info[
                            "is_running"] else True
                    else:
                        tmp[key] = v
                if tmp["finished"]:
                    tmp["completed_per"] = 1.0
                else:
                    tmp["completed_per"] = round(
                        tmp["running_time"] /
                        max([tmp["running_time"], tmp["hope_runtime"]]), 3)
                # del tmp["hope_runtime"]
                # del tmp["is_success"]
                # del tmp["is_pause"]
                tmp.pop("hope_runtime")
                tmp.pop("is_success")
                tmp.pop("is_pause")
                _result = dict(tmp, **cron_trigger)
                print("_result", _result)
                if _result["status"] == 3:
                    _result["completed_per"] = 0
                    _result["running_time"] = 0
                    _result["start_stamp"] = None
                result.append(_result)
            else:
                result.append({"job_id": job_id, "errinfo": "no exists"})
        return result

    def getAllJobInfo(self):
        try:
            result = self.findCronJob(
                set(self._jobhandlers.keys()) | set(self._jobs.keys()))
            return result
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def start_addition(self):
        for _job in self._sched.get_jobs():
            job_id = self._get_job_attr(_job, "id")
            self._jobs.setdefault(job_id, _job)

    def start(self):
        try:
            self._sched.start()
            self._sched.pause()
            self.start_addition()
            self._sched.resume()
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def stop(self, iswait=True):
        try:
            self._sched.shutdown(wait=iswait)
            self._jobhandlers.clear()
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def pause_job(self, job_id):
        try:
            self._sched.pause_job(job_id=job_id)
            self._jobhandlers[job_id].ispause = True
            self._jobhandlers[job_id].status = 3
            self._jobhandlers[job_id].isrunning = False
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False

    def resume_job(self, job_id):
        try:
            self._sched.resume_job(job_id=job_id)
            self._jobhandlers[job_id].ispause = False
            self._jobhandlers[job_id].status = 1
            return True
        except:
            import traceback
            print(traceback.print_exc())
            return False
class RecreateModel:
    """
    Author: Marten Bolin
    Date: 2017-11-22
    Last update:
    Purpose: This will be ran continously to check for any changes made and rearrange the model
    """
    def __init__(self, daemon=False):
        """
        Author: Marten Bolin
        Date: 2017-11-22
        Last update:
        Purpose: The instantiation of the class, make sure to catch it in a variable so it can be
        terminated properly. Start the scheduler
        :param daemon : Sets daemon, (Optional) Default is False
        """
        # Set the variables to start state
        self.number_of_users = 0
        self.number_of_movies = 0
        self.number_of_ratings = 0

        # Set up and start the scheduler
        self.scheduled = BackgroundScheduler()
        if not daemon:
            self.scheduled._daemon = False
        self.scheduled.add_job(self._run, 'interval', seconds=2, id="2")
        self.scheduled.start()
        self.scheduled.modify_job(job_id="2", next_run_time=datetime.now())

    def _run(self):
        """
        Author: Marten Bolin
        Date:2017-11-22
        Last update:
        Purpose: The process that is ran, checks for updates and updates model
        """
        # Get current state
        current_number_of_users = len(RetrieveUser().retrieve_all_users())
        current_number_of_movies = len(RetrieveMovie().retrieve_movie())
        current_number_of_ratings = len(RetrieveRating().retrieve_ratings())
        # Checks rating first due to most likely to change
        if (self.number_of_ratings == current_number_of_ratings
                and self.number_of_users == current_number_of_users
                and self.number_of_movies == current_number_of_movies):
            print("Nothing new, no changes made.")
        else:
            print("Changes detected, adjusting model")
            CreateNewModel.create_new_model()
            self.number_of_users = len(RetrieveUser().retrieve_all_users())
            self.number_of_movies = len(RetrieveMovie().retrieve_movie())
            self.number_of_ratings = len(RetrieveRating().retrieve_ratings())

    def terminate(self):
        """
        Author: Marten Bolin
        Date: 2017-11-22
        Last update:
        Purpose: Terminates the process
        """
        print("Shutting down update_recommendations..")
        self.scheduled.shutdown()
        print("Recreatemodel has been shut down.")
Beispiel #12
0
class CronManager:
    def __init__(self, use_mongo_db=True):
        self.scheduler = BackgroundScheduler(
            timezone=timezone('Asia/Shanghai'))
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronJob',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        if not isinstance(cron_instance, Cron):
            raise TypeError('please add correct cron!')
        if cron_instance.trigger_type == 'interval':
            seconds = cron_instance.trigger_args.get('seconds')
            if not isinstance(seconds,
                              int) and not common.can_convert_to_int(seconds):
                raise TypeError('please set correct time interval')
            seconds = int(seconds)
            if seconds <= 0:
                raise ValueError('please set interval > 0')
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                seconds=seconds,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_cron_job_id(),
                max_instances=5,
                jitter=0)  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'date':
            run_date = cron_instance.trigger_args.get('run_date')
            # TODO 判断run_date类型
            job = self.scheduler.add_job(
                func=cron_instance.cron_mission,
                trigger=cron_instance.trigger_type,
                run_date=run_date,
                replace_existing=self.is_replace_existing,
                coalesce=True,
                id=cron_instance.get_cron_job_id(
                ))  # 玄学,新增job的时候不用加args,直接加对象调用的func
        elif cron_instance.trigger_type == 'cron':
            raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

        return cron_instance.get_cron_job_id()

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_job_id, project_id, cron_info):
        if not isinstance(cron_job_id, str):
            raise TypeError('cron_id must be str')

        if not isinstance(project_id, str):
            raise TypeError('project_id must be str')

        if not isinstance(cron_info, dict):
            raise TypeError('cron_info must be dict')

        trigger_type = cron_info.get('triggerType')
        interval = cron_info.get('interval')
        run_date = cron_info.get('runDate')
        test_suite_id_list = cron_info.get('testSuiteIdList')
        include_forbidden = cron_info.get('includeForbidden')
        test_env_id = cron_info.get('testEnvId')
        alarm_mail_list = cron_info.get('alarmMailList')
        try:
            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_job_id,
                    trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                # TODO 判断run_date类型
                self.scheduler.modify_job(
                    job_id=cron_job_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    include_forbidden=include_forbidden,
                    test_env_id=test_env_id,
                    alarm_mail_list=alarm_mail_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    run_date=run_date)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    include_forbidden=include_forbidden,
                    test_env_id=test_env_id,
                    alarm_mail_list=alarm_mail_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    seconds=interval)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_job_id,
                                      coalesce=True,
                                      args=[cron])
        except BaseException as e:
            raise TypeError('更新定时任务失败: %s' % e)

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_jobs(self):
        return self.scheduler.get_jobs()
Beispiel #13
0
def cook_report_uuid(scheduler: BackgroundScheduler,
                     report_uuid: str,
                     action="add") -> None:
    """
    :param scheduler:
    :param report_uuid:
    :param action: add/mod/del
    :return:
    """
    print(
        f"cook report uuid, scheduler: {scheduler}, report_uuid: {report_uuid}"
        f"action: {action}")
    if action == "del":
        scheduler.remove_job(job_id=report_uuid)

    ok, report_info = storage.get_report_info(report_uuid)
    print(f"report_info: {report_info}")
    if not ok:
        print(f"Fail to get report info for uuid: {report_uuid}"
              f"errmsg: {report_info}")
        return

    ok, page_info = storage.get_report_detail_info_by_id(report_uuid)
    if not ok:
        print(f"Fail to get report detail info for uuid: {report_uuid}, "
              f"errmsg: {page_info}")
        return

    # week/month
    sensor_group_ids = report_info["sensor_group_ids"]

    # 获取探针、探针组信息
    ok, sensor_id_group_mapping = storage.get_group_sensor_map(
        sensor_group_ids)
    if not ok:
        print(f"fail to get sensor_groups by id: {sensor_group_ids}")
        return

    sensors = list(sensor_id_group_mapping.keys())

    cron = CronTrigger.from_crontab(report_info["send_date"])
    _report_params = {
        "report_name": report_info["report_name"],
        "report_format": report_info["report_format"],
        "timed": report_info["timed"],
        "data_scope": report_info["data_scope"],
        "sensor_ids": sensors,
        "sensor_id_group_mapping": sensor_id_group_mapping,
        "page_info": page_info,
        "send_mail": report_info["send_email"],
    }

    if action == "mod":
        scheduler.modify_job(job_id=report_uuid,
                             trigger=cron,
                             kwargs=_report_params)

    elif action == "add":
        gen_report(**_report_params)
        scheduler.add_job(func=gen_report,
                          trigger=cron,
                          kwargs=_report_params,
                          id=report_uuid)

    else:
        raise ValueError("action must in ('add', 'del', 'mod')")
Beispiel #14
0
class MainRunner(object):
    workers = {}
    dirpath = "."
    defaultOutputPath = "output"

    def __init__(self, dirpath="."):
        """
        local path for load config
        """
        logger.info("Initialing Main Runner for Fuzzy Testing")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if "json" in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        """
        given a json file, load and create a task run regularly
        """
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data["path"] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if "interval" in data:
                interval = int(data["interval"])

            if "output" in data:
                if "defaultOutputPath" in data["output"]:
                    self.defaultOutputPath = data["output"]["defaultOutputPath"]
                if "dirpath" in data["output"]:
                    data["output"]["outputPath"] = os.path.join(self.defaultOutputPath, data["output"]["dirpath"])
            else:
                data["output"] = {"outputPath": self.defaultOutputPath}

            if "type" not in data:
                logger.error(
                    "Missing type attribute in \
                                your configuration file [%s]"
                    % fp
                )
                return None

            if fp in self.workers:  # existing runner found
                logger.info("Update exisitng runner [%s]" % fp)
                runner = self.workers[fp]
                runner.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp, func=runner.run, name=runner.name)

            else:  # Create new
                logger.info("Create new runner [%s]" % fp)
                module_path = data["type"][: data["type"].rfind(".")]
                object_name = data["type"][data["type"].rfind(".") + 1 :]
                try:
                    runner_module = getattr(importlib.import_module(module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                runner = runner_module(**data)
                self.workers[fp] = runner
                self.scheduler.add_job(runner.run, "interval", id=fp, name=runner.name, seconds=interval)
            return runner
        return None

    def list(self):
        """
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        """
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        """
        given file path, stop running instance if possible
        """
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        """
        TODO:
        1. remove by start, end
        2. by directory(?)
        """
        pass

    def unload_all(self):
        """
        stop all running instances
        """
        self.scheduler.shutdown()

    def pause(self, fp):
        """
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        """
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        """
        For periodical minions, waking them according to timing
        """
        pass
Beispiel #15
0
class Boss(object):
    workers = {}
    dirpath = '.'
    output = None

    def __init__(self, dirpath='.', output='output'):
        '''
        local path for load config
        '''
        logger.info("Initialing BOSS")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading job config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")
        self.output = output
        logger.info("Setup output folder: " + output)
        if not os.path.isdir(output):
            logger.info("target directory "
                        + output
                        + " doesn't exist, creating..")
            os.makedirs(output)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if 'interval' in data:
                interval = int(data['interval'])
            if self.output:
                # TODO: test case for no 'output' key in data
                if 'output' not in data:
                    data['output'] = {}
                output = data['output']
                if 'dirpath' in output:
                    output['dirpath'] = os.path.join(self.output, output['dirpath'])
                else:
                    output['dirpath'] = self.output
                if 'type' not in data:
                    logger.error("Missing type attribute in \
                                    your configruation file [%s]" % fp)
                    return None
            if fp in self.workers:  # existing minion found
                logger.info("Update exisitng minion [%s]" % fp)
                minion = self.workers[fp]
                minion.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp,
                                          func=minion.collect,
                                          name=minion.name+'_'+minion.serial
                                          )

            else:  # Create new
                logger.info("Create new minion [%s]" % fp)
                module_path = data['type'][:data['type'].rfind(".")]
                object_name = data['type'][data['type'].rfind(".") + 1:]
                try:
                    minion_module = getattr(importlib.import_module(
                                            module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                minion = minion_module(**data)
                self.workers[fp] = minion
                self.scheduler.add_job(minion.collect, 'interval',
                                       id=fp,
                                       name=minion.name + '_' + minion.serial,
                                       seconds=interval
                                       )
            return minion
        return None

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
Beispiel #16
0
class SchedulerService(rpyc.Service):
    def __init__(self, **config):
        self._scheduler = BackgroundScheduler()
        self._scheduler.configure(**config)
        self._scheduler.start()
        self.logger = logging.getLogger("Heartbeat.core")
        self.logger.info("Heartbeat Core Initalized")

    def on_connect(self, conn):
        # code that runs when a connection is created
        # (to init the service, if needed)
        self.logger.info("----------Begin New Client----------")
        self.logger.info(conn)
        self.logger.info("----------End New Client----------")

    def on_disconnect(self, conn):
        # code that runs after the connection has already closed
        # (to finalize the service, if needed)
        self.logger.info("----------Begin Goodbye Client----------")
        self.logger.info(conn)
        self.logger.info("----------End Goodbye Client----------")

    def exposed_add_job(self, func, *args, **kwargs):
        self.logger.info("----------Begin New Job----------")
        self.logger.info("Function: %s", str(func))
        self.logger.info("*args: %s", str(args))
        self.logger.info("**kwargs: %s", str(dict(kwargs)))
        self.logger.info("----------Eng New Job----------")
        return self._scheduler.add_job(func, *args, **kwargs)

    def exposed_modify_job(self, job_id, jobstore=None, **changes):
        return self._scheduler.modify_job(job_id, jobstore, **changes)

    def exposed_reschedule_job(self,
                               job_id,
                               jobstore=None,
                               trigger=None,
                               **trigger_args):
        return self._scheduler.reschedule_job(job_id, jobstore, trigger,
                                              **trigger_args)

    def exposed_pause_job(self, job_id, jobstore=None):
        return self._scheduler.pause_job(job_id, jobstore)

    def exposed_resume_job(self, job_id, jobstore=None):
        return self._scheduler.resume_job(job_id, jobstore)

    def exposed_remove_job(self, job_id, jobstore=None):
        self._scheduler.remove_job(job_id, jobstore)

    def exposed_get_job(self, job_id, jobstore=None):
        return self._scheduler.get_job(job_id, jobstore=jobstore)

    def exposed_get_jobs(self, jobstore=None):
        results = self._scheduler.get_jobs(jobstore)
        return results

    def exposed_get_tasks(self):
        """Return a list of schedule-able function"""
        tasks = []
        for module_file in os.listdir(
                os.path.join(os.path.dirname(__file__), "task")):
            if module_file == "__init__.py" or module_file[-3:] != ".py":
                continue
            module_name = "server.task.{}".format(module_file[:-3])
            module = importlib.import_module(module_name)
            if not hasattr(module, "__all__"):
                continue
            for function_name in module.__all__:
                function = getattr(module, function_name)
                if not callable(function):
                    continue
                parameters = inspect.signature(function).parameters
                parameters_str = ", ".join(
                    [str(val) for key, val in parameters.items()])
                tasks.append("{}:{}({})".format(module_name, function_name,
                                                parameters_str))
        return tasks
Beispiel #17
0
class CronManager:
    def __init__(self, use_mongo_db=True):
        self.scheduler = BackgroundScheduler(
            timezone=timezone('Asia/Shanghai'))
        self.scheduler.configure()

        if use_mongo_db:
            self.job_store = MongoDBJobStore(database='apscheduler',
                                             collection='cronJob',
                                             client=db)
            self.scheduler.add_jobstore(self.job_store)
            self.is_replace_existing = True
        else:
            self.is_replace_existing = False

    def add_cron(self, cron_instance):
        try:
            if not isinstance(cron_instance, Cron):
                raise TypeError('please add correct cron!')
            if cron_instance.trigger_type == 'interval':
                seconds = cron_instance.trigger_args.get('seconds')
                if not isinstance(
                        seconds,
                        int) and not common.can_convert_to_int(seconds):
                    raise TypeError('please set correct time interval')
                seconds = int(seconds)
                if seconds <= 0:
                    raise ValueError('please set interval > 0')
                job = self.scheduler.add_job(
                    func=cron_instance.cron_mission,
                    trigger=cron_instance.trigger_type,
                    seconds=seconds,
                    replace_existing=self.is_replace_existing,
                    coalesce=True,
                    id=cron_instance.get_cron_job_id(),
                    max_instances=5,
                    jitter=0)
            elif cron_instance.trigger_type == 'date':
                run_date = cron_instance.trigger_args.get('run_date')
                # TODO 判断run_date类型
                job = self.scheduler.add_job(
                    func=cron_instance.cron_mission,
                    trigger=cron_instance.trigger_type,
                    run_date=run_date,
                    replace_existing=self.is_replace_existing,
                    coalesce=True,
                    id=cron_instance.get_cron_job_id())
            elif cron_instance.trigger_type == 'cron':
                raise TypeError('暂时不支持 trigger_type 等于 \'cron\'')

            return cron_instance.get_cron_job_id()
        except BaseException as e:
            with app.app_context():
                current_app.logger.error("add_cron failed. - %s" % str(e))

    def start(self, paused=False):
        self.scheduler.start(paused=paused)

    def pause_cron(self, cron_id=None, pause_all=False):
        if pause_all:
            self.scheduler.pause()
        elif cron_id:
            self.scheduler.pause_job(job_id=cron_id)

    def resume_cron(self, cron_id=None, resume_all=False):
        if resume_all:
            self.scheduler.resume()
        elif cron_id:
            self.scheduler.resume_job(job_id=cron_id)

    def del_cron(self, cron_id=None, del_all=False):
        if del_all:
            self.scheduler.remove_all_jobs()
        elif cron_id:
            self.scheduler.remove_job(job_id=cron_id)

    def update_cron(self, cron_job_id, project_id, cron_info):
        try:
            if not isinstance(cron_job_id, str):
                raise TypeError('cron_id must be str')

            if not isinstance(project_id, str):
                raise TypeError('project_id must be str')

            if not isinstance(cron_info, dict):
                raise TypeError('cron_info must be dict')

            trigger_type = cron_info.get('triggerType')
            interval = cron_info.get('interval')
            run_date = cron_info.get('runDate')
            test_suite_id_list = cron_info.get('testSuiteIdList')
            include_forbidden = cron_info.get('includeForbidden')
            test_env_id = cron_info.get('testEnvId')
            always_send_mail = cron_info.get('alwaysSendMail')
            alarm_mail_group_list = cron_info.get('alarmMailGroupList')
            enable_wxwork_notify = cron_info.get('enableWXWorkNotify')
            wxwork_api_key = cron_info.get('WXWorkAPIKey')
            wxwork_mention_mobile_list = cron_info.get(
                'WXWorkMentionMobileList')
            always_wxwork_notify = cron_info.get('alwaysWXWorkNotify')
            enable_ding_talk_notify = cron_info.get('enableDingTalkNotify')
            ding_talk_access_token = cron_info.get('DingTalkAccessToken')
            ding_talk_at_mobiles = cron_info.get('DingTalkAtMobiles')
            ding_talk_secret = cron_info.get('DingTalkSecret')
            always_ding_talk_notify = cron_info.get('alwaysDingTalkNotify')

            if trigger_type == 'interval' and int(interval) > 0:
                self.scheduler.modify_job(
                    job_id=cron_job_id,
                    trigger=IntervalTrigger(seconds=interval))
            elif trigger_type == 'date':
                self.scheduler.modify_job(
                    job_id=cron_job_id, trigger=DateTrigger(run_date=run_date))
            else:
                raise TypeError('更新定时任务触发器失败!')
            if run_date:
                cron = Cron(
                    cron_job_id=cron_job_id,
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    test_env_id=test_env_id,
                    include_forbidden=include_forbidden,
                    enable_wxwork_notify=enable_wxwork_notify,
                    wxwork_api_key=wxwork_api_key,
                    wxwork_mention_mobile_list=wxwork_mention_mobile_list,
                    always_wxwork_notify=always_wxwork_notify,
                    enable_ding_talk_notify=enable_ding_talk_notify,
                    ding_talk_access_token=ding_talk_access_token,
                    ding_talk_at_mobiles=ding_talk_at_mobiles,
                    ding_talk_secret=ding_talk_secret,
                    always_ding_talk_notify=always_ding_talk_notify,
                    always_send_mail=always_send_mail,
                    alarm_mail_group_list=alarm_mail_group_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有真正起到作用, 仅修改展示字段
                    run_date=run_date)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            else:
                cron = Cron(
                    cron_job_id=cron_job_id,
                    test_suite_id_list=test_suite_id_list,
                    project_id=project_id,
                    include_forbidden=include_forbidden,
                    enable_wxwork_notify=enable_wxwork_notify,
                    wxwork_api_key=wxwork_api_key,
                    wxwork_mention_mobile_list=wxwork_mention_mobile_list,
                    always_wxwork_notify=always_wxwork_notify,
                    enable_ding_talk_notify=enable_ding_talk_notify,
                    ding_talk_access_token=ding_talk_access_token,
                    ding_talk_at_mobiles=ding_talk_at_mobiles,
                    ding_talk_secret=ding_talk_secret,
                    always_ding_talk_notify=always_ding_talk_notify,
                    test_env_id=test_env_id,
                    always_send_mail=always_send_mail,
                    alarm_mail_group_list=alarm_mail_group_list,
                    trigger_type=trigger_type,  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
                    seconds=interval)  # 更新定时器时,此参数并没有起到作用, 仅修改展示字段
            # 玄学,更改job的时候必须改args,不能改func
            self.scheduler.modify_job(job_id=cron_job_id,
                                      coalesce=True,
                                      args=[cron])
        except BaseException as e:
            with app.app_context():
                current_app.logger.error("update_cron failed. - %s" % str(e))

    def shutdown(self, force_shutdown=False):
        if force_shutdown:
            self.scheduler.shutdown(wait=False)
        else:
            self.scheduler.shutdown(wait=True)

    def get_jobs(self):
        return self.scheduler.get_jobs()
Beispiel #18
0
class ExScheduler(object):
    def __init__(self):
        self.__scheduler = BackgroundScheduler()
        self.__jobstatuslist = {}

    def start_scheduler(self, **options):
        self.__scheduler.configure(**options)
        self.__scheduler.start()

    def shutdownscheduler(self):
        self.__scheduler.shutdown(wait=False)

    def getjob(self, jobid):
        return self.__scheduler.get_job(jobid)

    def scheduledjob(self, *args, **kw):
        return self.__scheduler.scheduled_job(*args, **kw)

    def addjob(self, *args, **kw):
        job = self.__scheduler.add_job(*args, **kw)
        self.__scheduler.wakeup()
        return job

    def removejob(self, job_id, jobstore=None):
        self.__scheduler.remove_job(job_id=job_id, jobstore=jobstore)

    def pausejob(self, job_id, jobstore=None):
        self.__scheduler.pause_job(job_id=job_id, jobstore=jobstore)
        self.setjobstatus(job_id, JobStatus.PAUSED)

    def resumejob(self, job_id, jobstore=None):
        self.__scheduler.resume_job(job_id=job_id, jobstore=jobstore)
        self.setjobstatus(job_id, JobStatus.SCHEDULING)

    def modifyjob(self, job_id, jobstore=None, **kw):
        job = self.__scheduler.modify_job(job_id=job_id,
                                          jobstore=jobstore,
                                          **kw)
        return job

    def getjoblist(self):
        joblist = self.__scheduler.get_jobs()
        return map(self._getjobinfo, joblist)

    def addlistener(self, callback, mask=EVENT_ALL):
        self.__scheduler.add_listener(callback, mask)

    def setjobstatus(self, jobid, jobstatus):
        self.__jobstatuslist[jobid] = jobstatus

    def jobstatusinitial(self, job_id, jobstore=None):
        job = self.__scheduler.get_job(job_id=job_id, jobstore=jobstore)
        if job_id not in self.__jobstatuslist:
            status = (JobStatus.SCHEDULING
                      if job.next_run_time else JobStatus.PAUSED) if hasattr(
                          job, 'next_run_time') else JobStatus.PENDING
            self.setjobstatus(job_id, status)

    def _getjobinfo(self, job):
        self.jobstatusinitial(job.id)
        return {
            "id":
            str(job.id),
            "name":
            str(job.name),
            "kwargs":
            job.kwargs,
            "trigger":
            trigger_str_to_dict(str(job.trigger)),
            "next_run_time":
            datetime_repr(job.next_run_time)
            if self.__jobstatuslist[str(job.id)] == JobStatus.SCHEDULING or
            self.__jobstatuslist[str(job.id)] == JobStatus.RUNNING else "--",
            "status":
            self.__jobstatuslist[str(job.id)]
        }
Beispiel #19
0
class MainRunner(object):
    workers = {}
    dirpath = '.'
    defaultOutputPath = 'output'

    class NoRunningFilter(logging.Filter):
        def filter(self, record):
            return not record.msg.startswith('Execution')

    def __init__(self, dirpath='.'):
        '''
        local path for load config
        '''
        logger.info("Initialing Main Runner for Hasal agent")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading runner config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.load_dir(self.dirpath)

        event_handler = JsonHandler(patterns=["*.json"], ignore_directories=True)
        event_handler.set_handler(oncreated=self.load, onmodified=self.load, ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

        my_filter = self.NoRunningFilter()
        logging.getLogger("apscheduler.scheduler").addFilter(my_filter)

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open("agent.log", 'w+') as f:
            f.write(fp + " was loaded!")
        data = {}
        loaded = False
        for _ in range(10):
            try:
                with open(fp) as in_data:
                    data = json.load(in_data)
                    # default will load JOB_NAME parameter in Jenkins created json file
                    data['name'] = data.get('JOB_NAME', "Jenkins Job")
                    data['path'] = fp
                    loaded = True
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            except Exception as e:
                logger.warning("File is not ready. Wait 1 second for another try.")
                time.sleep(1)

        if not loaded:
            logger.warning(fp + " is not ready for 10 seconds.")
            return None

        # load interval value from Jenkins created json file (default : 30 )
        interval = int(data.get('interval', 30))

        # load outputpath and defaultoutputpath from Jenkins created json file
        if 'output' in data:
            if 'defaultOutputPath' in data['output']:
                self.defaultOutputPath = data['output']['defaultOutputPath']
            if 'dirpath' in data['output']:
                data['output']['outputPath'] = os.path.join(self.defaultOutputPath, data['output']['dirpath'])
        else:
            data['output'] = {'outputPath': self.defaultOutputPath}

        if fp in self.workers:  # existing runner found
            logger.info("Update exisitng runner [%s]" % fp)
            runner = self.workers[fp]
            runner.update(**data)
            # //memo: Interval can't be modified
            self.scheduler.modify_job(job_id=fp,
                                      func=runner.run,
                                      name=runner.name
                                      )

        else:  # Create new
            logger.info("Create new runner [%s]" % fp)
            module_path = data.get('AGENT_MODULE_PATH', "hasalTask")
            object_name = data.get('AGENT_OBJECT_NAME', "HasalTask")
            try:
                runner_module = getattr(importlib.import_module(
                                        module_path), object_name)
            except Exception as e:
                logger.exception(e)
                return None

            runner = runner_module(**data)
            self.workers[fp] = runner
            self.scheduler.add_job(runner.run, 'interval',
                                   id=fp,
                                   name=runner.name,
                                   seconds=interval
                                   )
        return runner

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
Beispiel #20
0
class Scheduler:
    TRIGGERS = {
        "trig_5minutes": {
            "id": "trig_5minutes",
            "name": "Every five minutes",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(minute="*/5"),
            "from_trigger": lambda trig: []
        },
        "trig_hourly": {
            "id": "trig_hourly",
            "name": "Each hour",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(hour="*"),
            "from_trigger": lambda trig: []
        },
        "trig_daily": {
            "id": "trig_daily",
            "name": "Each day",
            "options": [],
            "schema": {},
            "trigger_args": lambda args: dict(day="*"),
            "from_trigger": lambda trig: []
        },
        "trig_weekday": {
            "id":
            "trig_weekday",
            "name":
            "Each weekday",
            "options": [{
                "id": i,
                "name": el,
                "active": True
            } for i, el in enumerate("Mon Tue Wed Thu Fri Sat Sun".split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 6
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(day_of_week=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[4]).split(",")]
        },
        "trig_monthly": {
            "id":
            "trig_monthly",
            "name":
            "Each month",
            "options": [{
                "id": i + 1,
                "name": el,
                "active": True
            } for i, el in enumerate(("Jan Feb Mar Apr May Jun "
                                      "Jul Aug Sep Oct Nov Dec").split())],
            "schema": {
                "id": {
                    "type": "integer",
                    "coerce": int,
                    "min": 0,
                    "max": 12
                },
                "name": {
                    "type": "string",
                    "required": False
                },
                "active": {
                    "type": "boolean",
                    "coerce": utility.coerce_bool,
                    "required": True
                }
            },
            "trigger_args":
            lambda args: dict(month=",".join(str(a) for a in args)),
            "from_trigger":
            lambda trig: [int(d) for d in str(trig.fields[1]).split(",")]
        },
    }
    """Predefined triggers and their argument checks."""
    def __init__(self,
                 elastic,
                 crawler_dir="crawlers",
                 crawler_args={},
                 **cron_defaults):
        """Initializes the scheduler by binding it to it's elasticsearch db.

        Args:
            elastic (elasticsearch.Elasticsearh): The es-client to save the
                crawling jobs in.
            crawler_dir (str): the directory, where the crawlers will be found.
                Defaults to "crawlers".
            job_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.
            **cron_defaults (dict): a dictionary of keyword arguments for
                the schedulers job_defaults.

        Returns:
            Scheduler: a fresh Scheduler instance.
        """
        jobstores = {
            "default": {
                "type": "memory"
            },
            "elastic": InjectorJobStore(kwargs=crawler_args, client=elastic)
        }

        executors = {
            "default": ThreadPoolExecutor(10),
            "processpool": ProcessPoolExecutor(10)
        }

        job_defaults = {
            "misfire_grace_time": 5 * 60,  # 5min
            "coalesce": True,
        }

        self.cron_defaults = utility.DefaultDict(
            {
                # standard is every day at 00:00:00
                "hour": 0,
                "minute": 0,
                "second": 0
            },
            **cron_defaults)

        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults,
                                             timezone=utc)

        self.crawlers = _detect_crawlers()
        # set up the validator schema.
        self.job_validator = cerberus.Validator(SCHEMATA["job"]({
            "trigger_ids":
            list(self.TRIGGERS)
        }),
                                                allow_unknown=True)
        self.scheduler.start()

    def upsert_job(self, job_dict, **runtime_args):
        """Adds or updates a job using the provided user_input.

        If an id field is present in the dict, the job is updated, otherwise
        a new one is created.

        Args:
            job_dict (dict): user input for a job, as defined in `SCHEMATA`.
            **runtime_args (dict): additional runtime arguments for the
                crawler.

        Returns:
            apscheduler.job.Job: a new job Object.
        """
        if not self.job_validator.validate(job_dict):
            raise (AssertionError(str(self.job_validator.errors)))

        doc = utility.SDA(job_dict)

        job = self.crawlers.get(doc["crawler.id"], None)
        # default to the SearchPlugin, and give the search name as argument.
        if job is None:
            inst = {
                "args": ("SearchPlugin", runtime_args),
                "kwargs": dict(search_id=doc["crawler.id"])
            }
        else:
            inst = {"args": (doc["crawler.id"], runtime_args), "kwargs": {}}
        trigger = self._make_trigger(doc["schedule"])

        if doc["id"]:
            self.scheduler.modify_job(doc["id"],
                                      jobstore="elastic",
                                      func=_run_plugin,
                                      name=doc["name.name"],
                                      **inst)
            new_job = self.scheduler.reschedule_job(doc["id"],
                                                    jobstore="elastic",
                                                    trigger=trigger)
        else:
            # use the crawler id as name, when the job is created.
            new_job = self.scheduler.add_job(_run_plugin,
                                             jobstore="elastic",
                                             trigger=trigger,
                                             name=doc["crawler.id"],
                                             **inst)

        return new_job

    def get_triggers(self):
        """Returns a list of triggers, that are predefined in the system.

        Returns:
            list: a list of tuples, holding id and name for each trigger.
        """
        return [{
            "id": v["id"],
            "name": v["name"],
            "options": v["options"]
        } for v in self.TRIGGERS.values()]

    def sync_jobs(self, joblist):
        """Synchronize the current jobs with a given list of jobs.

        This means, that all jobs not included in the list will be removed,
        existing ones will be updated and new ones will be added to the
        scheduler.

        Args:
            joblist (list): a list of jobs in the format of the schema.

        Returns:
            bool: whether this operation was successful or not.
        """
        logger.debug("Syncing job lists ...")
        current_jobs = self.get_jobs()
        jobs_to_keep = {j["id"] for j in joblist if j.get("id")}

        # remove old jobs
        for job in current_jobs:
            if job["id"] not in jobs_to_keep:
                self.scheduler.remove_job(job["id"], jobstore="elastic")

        # update and add jobs
        for job in joblist:
            self.upsert_job(job)

        return True

    def _make_trigger(self, trigger_doc):
        """Creates a trigger from a given dictionary of user input."""
        # we can assume, that an id for the trigger is given in the input.
        cur_trigger = self.TRIGGERS[trigger_doc["id"]]
        option_validator = cerberus.Validator(cur_trigger["schema"])

        args = [
            o["id"] for o in trigger_doc["options"]
            if option_validator(o) and o["active"]
        ]

        trigger_args = cur_trigger["trigger_args"](args)
        return CronTrigger(**trigger_args)

    def _serialize_trigger(self, trigger):
        """Serializes a trigger into a json array, as defined in TRIGGERS."""
        # since we only have a defined set of triggers, the following is
        # possible.
        mapping = [(v["trigger_args"]([]).keys(), k)
                   for k, v in self.TRIGGERS.items()]

        trigger_doc = None
        result = {}
        for keys, name in mapping:
            # all keys for the mapping need to be defined.
            def_keys = [f.name for f in trigger.fields if not f.is_default]
            if all([(key in def_keys) for key in keys]):
                trigger_doc = self.TRIGGERS[name]
                break

        if not trigger_doc:
            return result

        result["name"] = trigger_doc["name"]
        result["id"] = trigger_doc["id"]
        args = set(trigger_doc["from_trigger"](trigger))
        # copy the list of options (otherwise this leads to nasty side effects)
        options = [dict(**item) for item in trigger_doc["options"]]
        for option in options:
            option["active"] = option["id"] in args
        result["options"] = options

        return result

    def get_jobs(self):
        """Returns a list of jobs that are scheduled in the system.

        Returns:
            list: a list of job-dicts, holding the id and the runtimes.
        """
        jobs = self.scheduler.get_jobs()
        joblist = []
        for job in jobs:
            joblist.append({
                "id": job.id,
                "name": {
                    "name": job.name
                },
                "crawler": {
                    "id": job.args[0]
                },
                "schedule": self._serialize_trigger(job.trigger),
                "next_run": {
                    "name": job.next_run_time
                }
            })
        logger.debug(f"Retrieved {len(joblist)} jobs from the jobstore.")
        return joblist

    def run_job(self, job_id):
        """Runs the job with the specified id immediately.

        Args:
            job_id: the id of the job that should be run.

        Returns:
            bool: whether running the job succeeded or not.
        """
        logger.debug(f"Running job '{job_id}' directly.")
        cur_job = self.scheduler.get_job(job_id, jobstore="elastic")
        if cur_job is None:
            return False

        cur_job.func(*cur_job.args, **cur_job.kwargs)
        return True
Beispiel #21
0
    },
})


@scheduler.scheduled_job('interval',
                         id='pull_latest_image',
                         seconds=pull_interval,
                         timezone=utc)
def pull_latest_image():
    # Try to pull a new image
    if interactions.pull(image_name):
        # If we got a new image, trigger a rebalance
        balance_containers.trigger()


pull_latest_image.trigger = lambda: scheduler.modify_job(
    'pull_latest_image', next_run_time=datetime.now())


@scheduler.scheduled_job('interval',
                         id='balance_containers',
                         seconds=rebalance_interval,
                         timezone=utc)
def balance_containers():
    try:
        # Clean up before interacting with the pool:
        # - checks in containers that are checked out longer than the max lifetime
        # - stops containers that aren't running if their image is out of date
        # - stops containers from the pool not running selenium
        for container in interactions.containers():
            if container.checked_out:
                if ((datetime.utcnow() -
Beispiel #22
0
class Boss(object):
    workers = {}
    dirpath = '.'
    output = None

    def __init__(self, dirpath='.', output='output'):
        '''
        local path for load config
        '''
        logger.info("Initialing BOSS")
        if os.path.isdir(dirpath):
            self.dirpath = dirpath
            logger.info("loading job config folder: " + dirpath)
        else:
            logger.info(dirpath + " is invalid, use default path instead")
        self.output = output
        logger.info("Setup output folder: " + output)
        if not os.path.isdir(output):
            logger.info("target directory " + output +
                        " doesn't exist, creating..")
            os.makedirs(output)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

        self.load_dir(dirpath)

        event_handler = JsonHandler(patterns=["*.json"],
                                    ignore_directories=True)
        event_handler.set_handler(oncreated=self.load,
                                  onmodified=self.load,
                                  ondeleted=self.remove)
        observer = Observer()
        observer.schedule(event_handler, self.dirpath, recursive=True)
        observer.start()

    def load_dir(self, folder):
        (dirpath, dirnames, filenames) = os.walk(folder).next()
        for fname in filenames:
            if 'json' in fname[-4:]:
                self.load(os.path.join(dirpath, fname))

    def load(self, fp):
        '''
        given a json file, load and create a task run regularly
        '''
        logger.info(fp + " was loaded!")
        with open(fp) as in_data:
            try:
                data = json.load(in_data)
                data['path'] = fp
            except ValueError as e:
                logger.warning(fp + " loaded failed: " + e.message)
                return None
            interval = 30
            if 'interval' in data:
                interval = int(data['interval'])
            if self.output:
                # TODO: test case for no 'output' key in data
                if 'output' not in data:
                    data['output'] = {}
                output = data['output']
                if 'dirpath' in output:
                    output['dirpath'] = os.path.join(self.output,
                                                     output['dirpath'])
                else:
                    output['dirpath'] = self.output
                if 'type' not in data:
                    logger.error("Missing type attribute in \
                                    your configruation file [%s]" % fp)
                    return None
            if fp in self.workers:  # existing minion found
                logger.info("Update exisitng minion [%s]" % fp)
                minion = self.workers[fp]
                minion.update(**data)
                # //memo: Interval can't be modified
                self.scheduler.modify_job(job_id=fp,
                                          func=minion.collect,
                                          name=minion.name + '_' +
                                          minion.serial)

            else:  # Create new
                logger.info("Create new minion [%s]" % fp)
                module_path = data['type'][:data['type'].rfind(".")]
                object_name = data['type'][data['type'].rfind(".") + 1:]
                try:
                    minion_module = getattr(
                        importlib.import_module(module_path), object_name)
                except Exception as e:
                    logger.exception(e)
                    return None

                minion = minion_module(**data)
                self.workers[fp] = minion
                self.scheduler.add_job(minion.collect,
                                       'interval',
                                       id=fp,
                                       name=minion.name + '_' + minion.serial,
                                       seconds=interval)
            return minion
        return None

    def list(self):
        '''
        to list all configs loaded
        format: [squence number] [minion name] [config_path] [status]
        '''
        for (fp, worker) in self.workers:
            logger.info("path=" + fp + "," + str(worker) + ";")

    def remove(self, fp):
        '''
        given file path, stop running instance if possible
        '''
        if fp in self.workers:
            self.workers[fp].onstop()
            self.scheduler.remove_job(job_id=fp)
            del self.workers[fp]
            return True
        return False

    def remove_advanced(self):
        '''
        TODO:
        1. remove by start, end
        2. by directory(?)
        '''
        pass

    def unload_all(self):
        '''
        stop all running instances
        '''
        self.scheduler.shutdown()

    def pause(self, fp):
        '''
        simply stop running instance but not remove config
        TODO: should have timeout if stop failed
        '''
        self.scheduler.pause(job_id=fp)

    def resume(self, sn):
        # not sure we can do this
        pass

    def __del__(self):
        self.unload_all()

    def get_config(self):
        conf = {}
        return conf

    def _wake(self):
        '''
        For periodical minions, waking them according to timing
        '''
        pass
Beispiel #23
0
class TestSchedulerListener(unittest.TestCase):

    def setUp(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')
        self.scheduler.add_executor(ThreadPoolExecutor(1), alias='secondary_executor')

        self.scheduler.start()

    def tearDown(self):
        self.scheduler.shutdown()

    def test_watcher_injection(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.scheduler, self.scheduler, 'Watcher should keep a reference to the scheduler')
        self.assertEqual(1, len(self.scheduler._listeners), 'Watcher should inject itself as a scheduler listener')

        self.assertEqual(
            self.scheduler._listeners[0][1], EVENT_ALL, 'Watcher should register iself to watch all events'
        )

    def test_scheduler_inspection(self):
        self.scheduler.add_job(lambda: 0, jobstore='in_memory', trigger='interval', minutes=60, id='test_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual('running', watcher.scheduler_info['state'], 'Watcher should inspect scheduler status')
        self.assertEqual(
            str(self.scheduler.timezone),
            watcher.scheduler_info['timezone'],
            'Watcher should inspect scheduler timezone'
        )
        self.assertEqual(
            'BackgroundScheduler', watcher.scheduler_info['class'], 'Watcher should inspect scheduler class'
        )

        self.assertEqual(2, len(watcher.jobstores), 'Watcher should inspect all scheduler jobstores')
        self.assertIn('in_memory', watcher.jobstores, 'Watcher should have inspected the in_memory jobstore')

        self.assertEqual(2, len(watcher.executors), 'Watcher should inspect all scheduler executors')
        self.assertIn('secondary_executor', watcher.executors, 'Watcher should have inspected the secondary_executor')

        self.assertEqual(1, len(watcher.jobs), 'Watcher should inspect all jobs in scheduler on init')
        self.assertIn('test_job', watcher.jobs, 'Watcher should index jobs by id')

    def test_job_properties_on_add(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda x, y: x + y,
            id='added_job',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        self.assertIn('added_job', watcher.jobs)

        job_properties = watcher.jobs['added_job']['properties']

        self.assertEqual('added_job', job_properties['id'], 'Job properties should have the job id')
        self.assertEqual('Added job', job_properties['name'], 'Job properties should have the job name')
        self.assertIn('trigger', job_properties, 'Job properties should have a representation of the trigger')
        self.assertEqual('in_memory', job_properties['jobstore'], 'Job properties should have the jobstore name')
        self.assertEqual('default', job_properties['executor'], 'Job properties should have the executor name')
        self.assertIn('lambda', job_properties['func'], 'Job properties should have the function string repr')
        self.assertIn('func_ref', job_properties, 'Job properties should have the function reference')
        self.assertEqual('(1,)', job_properties['args'], 'Job properties should have the job arguments')
        self.assertEqual("{'y': 2}", job_properties['kwargs'], 'Job properties should have the job keyword arguments')
        self.assertIn('pending', job_properties, 'Job properties should have the job pending status')
        self.assertFalse(job_properties['pending'], 'Job status should not be pending')
        self.assertIn('coalesce', job_properties, 'Job properties should have the job coalesce configuration')
        self.assertIn('next_run_time', job_properties, 'Job properties should have the next run time calculated')
        self.assertIn('misfire_grace_time', job_properties, 'Job properties should have the misfire grace time')
        self.assertIn('max_instances', job_properties, 'Job properties should have the max instances configuration')

    def test_job_inspection_matches_job_added_event(self):
        # We're going to add two jobs that should have the exact same properties, except for the id, in two different
        # stages of the usage: before the watcher is created and after we start watching for events.
        def job_function(x, y):
            return x + y
        next_run_time = datetime.now() + timedelta(hours=1)

        # Job that is added before the user calls us.
        self.scheduler.add_job(
            job_function,
            id='job_1',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        watcher = SchedulerWatcher(self.scheduler)

        # Job that gets added after we start watching.
        self.scheduler.add_job(
            job_function,
            id='job_2',
            name='Added job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2},
            next_run_time=next_run_time
        )

        self.assertEqual(2, len(watcher.jobs))

        job_1 = watcher.jobs['job_1']
        job_2 = watcher.jobs['job_2']

        for property_name in job_1['properties'].keys():
            # All properties, except the id, should match.
            if property_name == 'id':
                continue
            self.assertEqual(job_1['properties'][property_name], job_2['properties'][property_name])

    def test_all_events_have_a_processing_method(self):
        for event_name in list(SchedulerWatcher.apscheduler_events.values()):
            self.assertIn(event_name, dir(SchedulerWatcher))

    def test_job_execution_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(
            lambda: time.sleep(0.02),
            id='waiting_job',
            name='Waiting job',
            jobstore='in_memory',
            trigger='interval',
            seconds=0.2,
            next_run_time=datetime.now()
        )

        job_events = watcher.jobs['waiting_job']['events']

        self.assertEqual(1, len(job_events))
        self.assertEqual('job_added', job_events[0]['event_name'])
        time.sleep(0.05)
        self.assertEqual(3, len(job_events), 'Job execution needs to be tracked in job events')
        self.assertEqual(
            'job_submitted',
            job_events[1]['event_name'],
            'Job submision needs to be tracked in job events'
        )
        self.assertEqual('job_executed', job_events[2]['event_name'], 'Job execution needs to be tracked in job events')

        time.sleep(0.2)

        self.assertEqual(5, len(job_events), 'Subsequent executions get tracked')

    def test_job_failure_monitoring(self):
        watcher = SchedulerWatcher(self.scheduler)

        def fail():
            time.sleep(0.02)
            return 0 / 0

        self.scheduler.add_job(
            fail,
            id='failing_job',
            name='Failing job',
            jobstore='in_memory',
            trigger='interval',
            next_run_time=datetime.now(),
            minutes=60
        )

        failing_job_events = watcher.jobs['failing_job']['events']

        time.sleep(0.05)
        self.assertEqual(3, len(failing_job_events))
        self.assertEqual('job_error', failing_job_events[2]['event_name'])

    def test_scheduler_summary(self):
        watcher = SchedulerWatcher(self.scheduler)

        summary = watcher.scheduler_summary()

        self.assertEqual(sorted(['scheduler', 'jobs', 'executors', 'jobstores']), sorted(summary.keys()))

        self.assertEqual('running', summary['scheduler']['state'], 'scheduler_summary should have the scheduler status')
        self.assertEqual(2, len(summary['executors']), 'scheduler_summaru should have the two added executors')
        self.assertEqual(2, len(summary['jobstores']), 'scheduler_summary should have the two executors')
        self.assertEqual(0, len(summary['jobs']), 'scheduler_summary should have no jobs')

        self.scheduler.add_job(lambda: 0, id='job_1')

        summary = watcher.scheduler_summary()

        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have the added jobs in it')

        self.scheduler.remove_job('job_1')

        summary = watcher.scheduler_summary()
        self.assertIn('job_1', summary['jobs'], 'scheduler_summary should have all jobs in it, even if job was removed')

    def test_removed_jobs_are_only_flagged_as_removed(self):
        self.scheduler.add_job(lambda: 0, id='a_job')

        watcher = SchedulerWatcher(self.scheduler)

        self.assertIn('a_job', watcher.jobs)
        self.assertIsNone(watcher.jobs['a_job']['removed_time'])

        self.scheduler.remove_job('a_job')

        self.assertIn('a_job', watcher.jobs, 'removed jobs should be still tracked in the scheduler watcher')
        self.assertIsNotNone(watcher.jobs['a_job']['removed_time'], 'removed_time should be set')

    def test_modified_job_properties_are_tracked(self):
        self.scheduler.add_job(
            lambda x, y: x + y,
            id='a_job',
            name='A job',
            jobstore='in_memory',
            trigger='interval',
            minutes=60,
            args=(1,),
            kwargs={'y': 2}
        )

        watcher = SchedulerWatcher(self.scheduler)

        self.assertEqual(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])

        next_run_time = watcher.jobs['a_job']['properties']['next_run_time'][0]

        self.scheduler.modify_job('a_job', name='A modified job', next_run_time=datetime.now() + timedelta(days=1))

        self.assertGreater(watcher.jobs['a_job']['modified_time'], watcher.jobs['a_job']['added_time'])
        self.assertEqual('A modified job', watcher.jobs['a_job']['properties']['name'])
        self.assertGreater(watcher.jobs['a_job']['properties']['next_run_time'][0], next_run_time)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_removing_a_jobstore_removes_all_jobs(self, mock_notify_jobstore_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='in_memory', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertIsNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be None')
        self.assertEqual('in_memory', watcher.jobs['job_1']['properties']['jobstore'])

        self.scheduler.remove_jobstore('in_memory')

        mock_notify_jobstore_event.assert_called()

        self.assertEqual(2, len(watcher.jobs), 'The amount of jobs after removing a jobstore should not change')
        self.assertIsNotNone(watcher.jobs['job_1']['removed_time'], 'job_1 removed time should be set')
        self.assertIsNotNone(watcher.jobs['job_2']['removed_time'], 'job_2 removed time should be set')

    @patch('apschedulerui.watcher.SchedulerWatcher._repr_job')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    @patch('apschedulerui.watcher.SchedulerWatcher.notify_jobstore_event')
    def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _):
        watcher = SchedulerWatcher(self.scheduler)

        jobstore = MemoryJobStore()

        jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1)))
        jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2)))

        self.assertEqual(0, len(watcher.jobs))

        self.scheduler.add_jobstore(jobstore, alias='in_memory_2')

        self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked')
        self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore')
        self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']]))
        self.assertEqual(2, mock_notify_job_event.call_count)
        mock_notify_jobstore_event.assert_called_once()

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_job_event')
    def test_removing_all_jobs_flags_all_as_removed(self, mock_notify_job_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_job(lambda: 0, id='job_1', jobstore='default', trigger='interval', minutes=60)
        self.scheduler.add_job(lambda: 0, id='job_2', jobstore='in_memory', trigger='interval', minutes=60)

        self.assertEqual(2, len(watcher.jobs))
        self.assertEqual(2, mock_notify_job_event.call_count)

        mock_notify_job_event.reset_mock()

        self.scheduler.remove_all_jobs()

        self.assertEqual(2, len(watcher.jobs), 'job count should not change after removing all jobs')
        self.assertEqual(2, mock_notify_job_event.call_count)

    @patch('apschedulerui.watcher.SchedulerWatcher.notify_executor_event')
    def test_adding_and_removing_executors(self, mock_notify_executor_event):
        watcher = SchedulerWatcher(self.scheduler)

        self.scheduler.add_executor(ThreadPoolExecutor(), alias='new_executor')

        self.assertIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

        mock_notify_executor_event.reset_mock()
        self.scheduler.remove_executor('new_executor')

        self.assertNotIn('new_executor', watcher.executors)
        mock_notify_executor_event.assert_called()

    def test_job_event_history_is_limited(self):
        watcher = SchedulerWatcher(self.scheduler, max_events_per_job=4)

        self.scheduler.add_job(lambda: 0, trigger='interval', seconds=0.01, id='recurrent_job')

        time.sleep(0.1)

        # recurrent_job should have been executed ~10 times now, generating ~20 events (submission + execution).
        self.assertEqual(
            watcher.max_events_per_job,
            len(watcher.jobs['recurrent_job']['events']),
            'job event history should be limited'
        )
Beispiel #24
0
class ApplicationController(object):
    def __init__(self):
        self.thread_scheduler = BackgroundScheduler()

        # Load or set up the config.
        self.yaml_path = self._find_config_home()
        try:
            self.yaml_config = yaml.safe_load(open(self._find_config_home()))
            if self.yaml_config is None:
                logging.getLogger(__name__).warning(
                    "YAML found but empty, remaking.")
                self.yaml_config = []
            else:
                logging.getLogger(__name__).debug("YAML found and loaded.")
        except FileNotFoundError:
            logging.getLogger(__name__).warning(
                "No YAML found. Making a new one.")
            open(self.yaml_path, 'a').close()
            self.yaml_config = []
        except yaml.YAMLError:
            logging.getLogger(__name__).warning(
                "Something's wrong with the YAML... remaking.")
        for job in self.yaml_config:
            self.thread_scheduler.add_job(
                lambda: self._check_for_processes(list(job.keys())[0]),
                id=list(job.keys())[0],
                trigger=IntervalTrigger(minutes=list(job.values())[0]))
        logging.getLogger(__name__).debug("Initialized jobs.")

    def _find_config_home(self):
        platform = sys.platform
        if 'linux' in platform:
            return os.path.expanduser('~/.local/share/tabber.cfg')
        elif 'win32' in platform:
            return os.path.expanduser('~/AppData/Roaming/tabber.cfg')
        elif 'mac' in platform:
            return os.path.expanduser(
                '~/Library/Application Support/tabber.cfg')
        else:
            print("Unknown OS. Only Windows, Mac, and Linux are supported.")
            return None

    def _save_config(self):
        with open(self.yaml_path, 'w') as yp:
            yaml.dump(self.yaml_config, yp)
        print("Saved config.")

    def _update_processes(self):
        self.current_processes.delete(0, 'end')
        try:
            proc_to_add = [
                str(["{:<20}  {:<3}".format(p, i) for p, i in proc.items()])
                for proc in self.yaml_config
            ]
            for i, proc in enumerate(self.yaml_config):
                self.current_processes.insert(
                    i, "{:<15} {:<3}".format(
                        list(proc.keys())[0],
                        list(proc.values())[0]))
        except AttributeError:
            raise
        self.current_processes.grid(row=3, column=0)

    def _add_proc(self, procname, interval):
        # IDs for the current jobs are equal to the procname
        if procname in [j.id for j in self.thread_scheduler.get_jobs()]:
            # it already exists, modify it
            self.thread_scheduler.modify_job(
                job_id=procname, trigger=IntervalTrigger(minutes=interval))
            # go through the list of current procname/interval k/v's and edit the combo
            for i, proc in enumerate(self.yaml_config):
                if proc == procname:
                    self.yaml_config[i][procname] = interval
        else:
            self.thread_scheduler.add_job(
                lambda: self._check_for_processes(procname),
                id=procname,
                trigger=IntervalTrigger(minutes=interval))
            self.yaml_config.append({procname: interval})
            self._save_config()

        self._update_processes()

    def _delete_proc_cursor(self, selected):
        procname = self.current_processes.get(selected[0])[:-3].strip()
        interval = int(self.current_processes.get(selected[0])[-3:])
        for i, x in enumerate(self.yaml_config):
            if procname == list(x.keys())[0] and interval == list(
                    x.values())[0]:
                del self.yaml_config[i]
        self._save_config()

        self.thread_scheduler.remove_job(
            self.current_processes.get(selected[0])[:-3].strip())
        self.current_processes.delete(selected[0])
        self._update_processes()

    def _check_for_processes(self, procname):
        do_alt_tab = False
        for proc in psutil.process_iter():
            if proc.name() in procname:
                do_alt_tab = True
        if do_alt_tab:
            self.alt_tab()

    def alt_tab(self):
        if "mac" in sys.platform:
            pyautogui.hotkey('command', 'tab')
        else:
            pyautogui.hotkey('alt', 'tab')

    def _init_gui(self):
        oriR = 1
        oriC = 0

        # Create the root window
        root = tk.Tk()
        root.geometry("500x400")
        root.title("Pro Gamer Switcher")

        menu = tk.Menu(root)
        root.config(menu=menu)
        filemenu = tk.Menu(menu)
        menu.add_cascade(label='File', menu=filemenu)
        filemenu.add_command(label='New')
        filemenu.add_command(label='Open...')
        filemenu.add_separator()
        filemenu.add_command(label='Exit', command=root.quit)
        helpmenu = tk.Menu(menu)
        menu.add_cascade(label='Help', menu=helpmenu)
        helpmenu.add_command(label='About')

        self.current_processes = tk.Listbox(root)
        try:
            proc_to_add = [
                str(["{:<20}  {:<3}".format(p, i) for p, i in proc.items()])
                for proc in self.yaml_config
            ]
            for i, proc in enumerate(self.yaml_config):
                self.current_processes.insert(
                    i, "{:<15} {:<3}".format(
                        list(proc.keys())[0],
                        list(proc.values())[0]))
        except AttributeError:
            raise
        self.current_processes.grid(row=oriR + 2, column=oriC)

        name_desc = tk.Label(root, text="Process:")
        name_desc.grid(row=oriR + 1, column=oriC)
        name_entry = tk.Entry(root, width=10)
        name_entry.grid(row=oriR + 1, column=oriC + 1)
        interval_desc = tk.Label(root, text="Interval (min):")
        interval_desc.grid(row=oriR + 1, column=oriC + 5)
        interval_entry = tk.Entry(root, width=3)
        interval_entry.grid(row=oriR + 1, column=oriC + 6)

        add_btn = tk.Button(root,
                            text="Add",
                            command=lambda: self._add_proc(
                                name_entry.get(), int(interval_entry.get())))
        add_btn.grid(row=oriR + 1, column=oriC + 7)
        del_btn = tk.Button(root,
                            text="Delete",
                            command=lambda: self._delete_proc_cursor(
                                self.current_processes.curselection()))
        del_btn.grid(row=oriR + 2, column=oriC + 7)

        root.mainloop()
Beispiel #25
0
class Scheduler:
    def __init__(self):
        self.__running_tasks = []

        self.aps_scheduler = BackgroundScheduler()

        # task listener
        def task_listener_add(event):
            if event.job_id not in self.__running_tasks:
                self.__running_tasks.append(event.job_id)
                event_stream(type='task', task=event.job_id)

        def task_listener_remove(event):
            if event.job_id in self.__running_tasks:
                self.__running_tasks.remove(event.job_id)
                event_stream(type='task', task=event.job_id)

        self.aps_scheduler.add_listener(task_listener_add, EVENT_JOB_SUBMITTED)
        self.aps_scheduler.add_listener(task_listener_remove,
                                        EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        # configure all tasks
        self.__cache_cleanup_task()
        self.update_configurable_tasks()

        self.aps_scheduler.start()

    def update_configurable_tasks(self):
        self.__sonarr_update_task()
        self.__radarr_update_task()
        self.__sonarr_full_update_task()
        self.__radarr_full_update_task()
        self.__update_bazarr_task()
        self.__search_wanted_subtitles_task()
        self.__upgrade_subtitles_task()
        self.__randomize_interval_task()
        if args.no_tasks:
            self.__no_task()

    def add_job(self,
                job,
                name=None,
                max_instances=1,
                coalesce=True,
                args=None):
        self.aps_scheduler.add_job(job,
                                   DateTrigger(run_date=datetime.now()),
                                   name=name,
                                   id=name,
                                   max_instances=max_instances,
                                   coalesce=coalesce,
                                   args=args)

    def execute_job_now(self, taskid):
        self.aps_scheduler.modify_job(taskid, next_run_time=datetime.now())

    def get_running_tasks(self):
        return self.__running_tasks

    def get_task_list(self):
        def get_time_from_interval(td_object):
            seconds = int(td_object.total_seconds())
            periods = [('year', 60 * 60 * 24 * 365),
                       ('month', 60 * 60 * 24 * 30), ('day', 60 * 60 * 24),
                       ('hour', 60 * 60), ('minute', 60), ('second', 1)]

            strings = []
            for period_name, period_seconds in periods:
                if seconds > period_seconds:
                    period_value, seconds = divmod(seconds, period_seconds)
                    has_s = 's' if period_value > 1 else ''
                    strings.append("%s %s%s" %
                                   (period_value, period_name, has_s))

            return ", ".join(strings)

        def get_time_from_cron(cron):
            year = str(cron[0])
            if year == "2100":
                return "Never"

            day = str(cron[4])
            hour = str(cron[5])

            if day == "*":
                text = "everyday"
            else:
                text = "every " + day_name[int(day)]

            if hour != "*":
                text += " at " + hour + ":00"

            return text

        task_list = []
        for job in self.aps_scheduler.get_jobs():
            next_run = 'Never'
            if job.next_run_time:
                next_run = pretty.date(job.next_run_time.replace(tzinfo=None))
            if isinstance(job.trigger, CronTrigger):
                if job.next_run_time and str(
                        job.trigger.__getstate__()['fields'][0]) != "2100":
                    next_run = pretty.date(
                        job.next_run_time.replace(tzinfo=None))

            if job.id in self.__running_tasks:
                running = True
            else:
                running = False

            if isinstance(job.trigger, IntervalTrigger):
                interval = "every " + get_time_from_interval(
                    job.trigger.__getstate__()['interval'])
                task_list.append({
                    'name': job.name,
                    'interval': interval,
                    'next_run_in': next_run,
                    'next_run_time': next_run,
                    'job_id': job.id,
                    'job_running': running
                })
            elif isinstance(job.trigger, CronTrigger):
                task_list.append({
                    'name':
                    job.name,
                    'interval':
                    get_time_from_cron(job.trigger.fields),
                    'next_run_in':
                    next_run,
                    'next_run_time':
                    next_run,
                    'job_id':
                    job.id,
                    'job_running':
                    running
                })

        return task_list

    def __sonarr_update_task(self):
        if settings.general.getboolean('use_sonarr'):
            self.aps_scheduler.add_job(
                update_series,
                IntervalTrigger(minutes=int(settings.sonarr.series_sync)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='update_series',
                name='Update Series list from Sonarr',
                replace_existing=True)
            self.aps_scheduler.add_job(
                sync_episodes,
                IntervalTrigger(minutes=int(settings.sonarr.episodes_sync)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='sync_episodes',
                name='Sync episodes with Sonarr',
                replace_existing=True)

    def __radarr_update_task(self):
        if settings.general.getboolean('use_radarr'):
            self.aps_scheduler.add_job(
                update_movies,
                IntervalTrigger(minutes=int(settings.radarr.movies_sync)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='update_movies',
                name='Update Movie list from Radarr',
                replace_existing=True)

    def __cache_cleanup_task(self):
        self.aps_scheduler.add_job(cache_maintenance,
                                   IntervalTrigger(hours=24),
                                   max_instances=1,
                                   coalesce=True,
                                   misfire_grace_time=15,
                                   id='cache_cleanup',
                                   name='Cache maintenance')

    def __sonarr_full_update_task(self):
        if settings.general.getboolean('use_sonarr'):
            full_update = settings.sonarr.full_update
            if full_update == "Daily":
                self.aps_scheduler.add_job(
                    update_all_episodes,
                    CronTrigger(hour=settings.sonarr.full_update_hour),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_episodes',
                    name='Update all Episode Subtitles from disk',
                    replace_existing=True)
            elif full_update == "Weekly":
                self.aps_scheduler.add_job(
                    update_all_episodes,
                    CronTrigger(day_of_week=settings.sonarr.full_update_day,
                                hour=settings.sonarr.full_update_hour),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_episodes',
                    name='Update all Episode Subtitles from disk',
                    replace_existing=True)
            elif full_update == "Manually":
                self.aps_scheduler.add_job(
                    update_all_episodes,
                    CronTrigger(year='2100'),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_episodes',
                    name='Update all Episode Subtitles from disk',
                    replace_existing=True)

    def __radarr_full_update_task(self):
        if settings.general.getboolean('use_radarr'):
            full_update = settings.radarr.full_update
            if full_update == "Daily":
                self.aps_scheduler.add_job(
                    update_all_movies,
                    CronTrigger(hour=settings.radarr.full_update_hour),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_movies',
                    name='Update all Movie Subtitles from disk',
                    replace_existing=True)
            elif full_update == "Weekly":
                self.aps_scheduler.add_job(
                    update_all_movies,
                    CronTrigger(day_of_week=settings.radarr.full_update_day,
                                hour=settings.radarr.full_update_hour),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_movies',
                    name='Update all Movie Subtitles from disk',
                    replace_existing=True)
            elif full_update == "Manually":
                self.aps_scheduler.add_job(
                    update_all_movies,
                    CronTrigger(year='2100'),
                    max_instances=1,
                    coalesce=True,
                    misfire_grace_time=15,
                    id='update_all_movies',
                    name='Update all Movie Subtitles from disk',
                    replace_existing=True)

    def __update_bazarr_task(self):
        if not args.no_update:
            task_name = 'Update Bazarr from source on Github'
            if args.release_update:
                task_name = 'Update Bazarr from release on Github'

            if settings.general.getboolean('auto_update'):
                self.aps_scheduler.add_job(check_and_apply_update,
                                           IntervalTrigger(hours=6),
                                           max_instances=1,
                                           coalesce=True,
                                           misfire_grace_time=15,
                                           id='update_bazarr',
                                           name=task_name,
                                           replace_existing=True)
            else:
                self.aps_scheduler.add_job(check_and_apply_update,
                                           CronTrigger(year='2100'),
                                           hour=4,
                                           id='update_bazarr',
                                           name=task_name,
                                           replace_existing=True)
                self.aps_scheduler.add_job(check_releases,
                                           IntervalTrigger(hours=6),
                                           max_instances=1,
                                           coalesce=True,
                                           misfire_grace_time=15,
                                           id='update_release',
                                           name='Update Release Info',
                                           replace_existing=True)

        else:
            self.aps_scheduler.add_job(check_releases,
                                       IntervalTrigger(hours=6),
                                       max_instances=1,
                                       coalesce=True,
                                       misfire_grace_time=15,
                                       id='update_release',
                                       name='Update Release Info',
                                       replace_existing=True)

    def __search_wanted_subtitles_task(self):
        if settings.general.getboolean('use_sonarr'):
            self.aps_scheduler.add_job(
                wanted_search_missing_subtitles_series,
                IntervalTrigger(
                    hours=int(settings.general.wanted_search_frequency)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='wanted_search_missing_subtitles_series',
                name='Search for wanted Series Subtitles',
                replace_existing=True)
        if settings.general.getboolean('use_radarr'):
            self.aps_scheduler.add_job(
                wanted_search_missing_subtitles_movies,
                IntervalTrigger(
                    hours=int(settings.general.wanted_search_frequency_movie)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='wanted_search_missing_subtitles_movies',
                name='Search for wanted Movies Subtitles',
                replace_existing=True)

    def __upgrade_subtitles_task(self):
        if settings.general.getboolean('upgrade_subs') and \
                (settings.general.getboolean('use_sonarr') or settings.general.getboolean('use_radarr')):
            self.aps_scheduler.add_job(
                upgrade_subtitles,
                IntervalTrigger(hours=int(settings.general.upgrade_frequency)),
                max_instances=1,
                coalesce=True,
                misfire_grace_time=15,
                id='upgrade_subtitles',
                name='Upgrade previously downloaded Subtitles',
                replace_existing=True)

    def __randomize_interval_task(self):
        for job in self.aps_scheduler.get_jobs():
            if isinstance(job.trigger, IntervalTrigger):
                self.aps_scheduler.modify_job(
                    job.id,
                    next_run_time=datetime.now() + timedelta(seconds=randrange(
                        job.trigger.interval.total_seconds() *
                        0.75, job.trigger.interval.total_seconds())))

    def __no_task(self):
        for job in self.aps_scheduler.get_jobs():
            self.aps_scheduler.modify_job(job.id, next_run_time=None)
class ScheduleDisplay(Surface):
    def __init__(self, width, height):
        Surface.__init__(self, (width, height))
        self.width = width
        self.height = height
        # You need this if you intend to display any text
        font.init()
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        # These are the folders we will use for images and fonts
        self.dir_path = path.dirname(path.realpath(__file__))
        self.assets_path = path.join(self.dir_path, 'Assets')
        # notice the 'Fonts' folder is located in the 'Assets'
        self.fonts_path = path.join(self.assets_path, 'Fonts')
        # Change this when done testing ----
        # Information specific to semester, building, and campus.
        # These are most often used by the BCScheduleCreator module to fill in web forms.
        # The same forms you would see if you went to http://searchclasses.butte.edu/
        self.location = 'Main Campus'
        self.building = 'MC'
        self.term = ''
        # The relative location of the files containing the raw class data.
        self.scheduleFile = 'scheduleData.json'
        # This file is a compilation of Subjects in the 'MC' building. The greatly shortens
        # the amount of time required to update the json file.
        self.compiledSubjectsFile = 'subjectsIn_MC.txt'
        self.backgroundImageFile = 'MapBG.png'
        self.backgroundImage = image.load(
            path.join(self.assets_path, self.backgroundImageFile))
        self.backgroundImage = transform.smoothscale(self.backgroundImage,
                                                     (self.width, self.height))
        self.backgroundImage.convert_alpha()
        # This defines the time after a class starts that it will still be displayed.
        # Ex: TimeSlot ends at 10:00, it will be visable until 10:00 + however miniutes.
        self.timeSlotDisplayBuffer = 8
        # Holds All Surfaces that are displayed, and normally the very next one to be displayed after the
        # next update.
        self.classesSurfacesAndTimes = []
        # Flag tells object to no longer update screen because nothing has changed.
        self.noMoreClassesFlag = False
        # Called by LoadTodaysClasses()
        self.todaysClasses = []
        # Called by Load TodaysTimeSlots()
        self.todaysTimeSlots = []
        self.timeSlotFont = (path.join(self.fonts_path,
                                       'OpenSans-CondBold.ttf'),
                             int(height * .03425))
        # Max number of classes that should be displayed at once.
        self.scheduleDisplay_numberOfClasses = 20
        # Shot letter-number phrase before the class title.
        self.classSurface_classCodeFont = (path.join(self.fonts_path,
                                                     'OpenSans-Bold.ttf'),
                                           int(height * .02877))
        # Distance between classCode and roomNumber in pixels.
        self.classSurface_classCodeLeftBuffer = int(width * .01580)
        self.classSurface_classTitleFont = (path.join(self.fonts_path,
                                                      'OpenSans-Regular.ttf'),
                                            int(height * .02740))
        # Distance between classTitle and classCode in pixels.
        self.classSurface_classTitleLeftBuffer = int(width * .18167)
        self.classSurface_classInstructorFont = (path.join(
            self.fonts_path, 'OpenSans-Regular.ttf'), int(height * .01370))
        # These can be removed, then we can just put (int(width/height * 1111)) where ever they end up in the code.
        self.classSurface_widthBuffer = int(width * .0158)
        self.classSurface_heightBuffer = int(height * .00411)
        self.classSurface_bgColors = ((242, 242, 242), (255, 255, 255))
        self.classSurface_roomNumberFont = (path.join(self.fonts_path,
                                                      'OpenSans-CondBold.ttf'),
                                            int(height * .04110))
        self.classSurface_floorSurface_widthRatio = .15
        self.classSurface_floorSurface_buffer = (0, 0)
        self.scheduler.add_job(CompileSubjectsInBuilding,
                               'cron',
                               id='CompileSubjects01',
                               day_of_week='mon-fri',
                               hour=1,
                               args=[
                                   self.building, self.term, self.location,
                                   path.join(self.dir_path,
                                             self.compiledSubjectsFile)
                               ])
        self.scheduler.add_job(self.UpdateJson,
                               'cron',
                               id='UpdateJson01',
                               day_of_week='mon-fri',
                               hour=2)
        self.scheduler.add_job(self.LoadTodaysClasses,
                               'cron',
                               id='LoadTodaysClasses01',
                               day_of_week='mon-fri',
                               hour=2,
                               minute=30)
        self.scheduler.add_job(self.LoadTodaysTimeSlots,
                               'cron',
                               id='LoadTodaysTimeSlots01',
                               day_of_week='mon-fri',
                               hour=2,
                               minute=35)
        self.InitializeJsonData()
        self.LoadTodaysClasses()
        self.LoadTodaysTimeSlots()

    def Update(self):
        needsUpdate = False
        maxHeight = self.height
        currentTime = datetime.now()
        if self.classesSurfacesAndTimes:
            latestTimeSlot = self.classesSurfacesAndTimes[0][1]
            if latestTimeSlot < currentTime:
                self.classesSurfacesAndTimes.pop(0)
                if not self.classesSurfacesAndTimes:
                    return self
                needsUpdate = True
                self.fill((252, 252, 252))
                self.blit(self.backgroundImage, (0, 0))
                for classesSurfaceAndTime in self.classesSurfacesAndTimes[:-1]:
                    self.blit(classesSurfaceAndTime[0],
                              ((self.classSurface_widthBuffer),
                               (self.height - maxHeight)))
                    maxHeight -= classesSurfaceAndTime[0].get_rect().height
                newestTimeSlotHeight = self.classesSurfacesAndTimes[-1][
                    0].get_rect().height
                if newestTimeSlotHeight < maxHeight:
                    self.blit(self.classesSurfacesAndTimes[-1][0],
                              ((self.classSurface_widthBuffer),
                               (self.height - maxHeight)))
                    maxHeight -= newestTimeSlotHeight
                    for i in range(len(self.todaysTimeSlots)):
                        nextClasses = self.GetNextTimeSlotClasses()
                        nextClassesSurface = self.CreateClassesSurface(
                            nextClasses)
                        if nextClassesSurface.get_rect().height > maxHeight:
                            break
                        self.blit(nextClassesSurface,
                                  ((self.classSurface_widthBuffer),
                                   (self.height - maxHeight)))
                        maxHeight -= nextClassesSurface.get_rect().height
        else:
            if self.todaysClasses:
                self.noMoreClassesFlag = False
                needsUpdate = True
                self.fill((252, 252, 252))
                self.blit(self.backgroundImage, (0, 0))
                for i in range(len(self.todaysTimeSlots)):
                    nextClasses = self.GetNextTimeSlotClasses()
                    nextClassesSurface = self.CreateClassesSurface(nextClasses)
                    if nextClassesSurface.get_rect().height > maxHeight:
                        break
                    self.blit(nextClassesSurface,
                              ((self.classSurface_widthBuffer),
                               (self.height - maxHeight)))
                    maxHeight -= nextClassesSurface.get_rect().height
            else:
                if not self.noMoreClassesFlag:
                    self.noMoreClassesFlag = True
                    needsUpdate = True
                    noClassesSurface = self.CreateClassesSurface([])
                    self.fill((252, 252, 252))
                    self.blit(self.backgroundImage, (0, 0))
                    self.blit(noClassesSurface, (0, 0))
        return needsUpdate

    # Should be called as the clock striked midnight.
    def LoadTodaysClasses(self):
        print('LoadTodaysClasses')
        currentTime = datetime.now()
        self.todaysClasses = []
        # Returns number from 0-6
        today = datetime.today().weekday()
        daysOfWeek = ['M', 'T', 'W', 'Th', 'F', 'Sat', 'S']
        data = LoadJsonToList(path.join(self.dir_path, self.scheduleFile))
        for meeting in data[1:]:
            if DoesClassMeet(
                    daysOfWeek[today], meeting, 'LEC'
            ):  # <<<< """''Artificially''""" made to "T" for testing, replace with daysOfWeek[today]
                meetingStart = ConvertMilitaryToStd(meeting['LEC']['Start'])
                timeSlot = datetime.combine(
                    currentTime.date(),
                    datetime.strptime(meetingStart, '%I:%M %p').time())
                timeSlot = timeSlot + timedelta(
                    minutes=self.timeSlotDisplayBuffer)
                if timeSlot >= currentTime:
                    self.todaysClasses.append(meeting)
        self.todaysClasses = sorted(self.todaysClasses,
                                    key=lambda k: k['LEC']['Start'])

    # Should be called as the clock striked midnight.
    def LoadTodaysTimeSlots(self):
        print('LoadTodaysTimeSlots')
        today = datetime.today().weekday()
        daysOfWeek = ['M', 'T', 'W', 'Th', 'F', 'Sat', 'S']
        self.todaysTimeSlots = []
        for meeting in self.todaysClasses:
            nextTimeSlot = ConvertMilitaryToStd(meeting['LEC']['Start'])
            if not self.todaysTimeSlots or self.todaysTimeSlots[
                    -1] != nextTimeSlot:
                self.todaysTimeSlots.append(nextTimeSlot)

    def GetNextTimeSlotClasses(self):
        print('GetNextTimeSlotClasses')
        nextTimeSlotClasses = []
        if self.todaysTimeSlots:
            for meeting in list(self.todaysClasses):
                if ConvertMilitaryToStd(
                        meeting['LEC']['Start']) == self.todaysTimeSlots[0]:
                    nextTimeSlotClasses.append(meeting)
                    self.todaysClasses.pop(0)
                else:
                    break
            self.todaysTimeSlots.pop(0)
        nextTimeSlotClasses = sorted(nextTimeSlotClasses,
                                     key=lambda k: k['LEC']['Room'])
        print(nextTimeSlotClasses)
        return nextTimeSlotClasses

    def CreateClassSurface(self, meeting, bg, width, height):
        room = meeting['LEC']['Room']
        instructor = meeting['Instructor']
        title = meeting['Title']
        classCodePatter = re.compile(r'^\w{2,5}-\d{1,3}')
        classCode = classCodePatter.search(title).group()
        title = title.replace(classCode + ' ', '')
        # Chooses which png to load based on number
        roomSurface = image.load(
            path.join(self.assets_path,
                      '2ndFloor2.png')) if int(room) < 200 else image.load(
                          path.join(self.assets_path, '1stFloor2.png'))
        roomSurface.convert_alpha()
        floorSurfaceDimensions = (
            int(width * self.classSurface_floorSurface_widthRatio),
            int(height - (2 * self.classSurface_floorSurface_buffer[1])))
        roomSurface = transform.smoothscale(roomSurface,
                                            floorSurfaceDimensions)
        roomSurfaceFont = font.Font(*self.classSurface_roomNumberFont)
        roomSurfaceText = roomSurfaceFont.render(room, True, (255, 255, 255))
        roomSurface.blit(roomSurfaceText, (roomSurface.get_rect().centerx -
                                           roomSurfaceText.get_rect().centerx,
                                           roomSurface.get_rect().centery -
                                           roomSurfaceText.get_rect().centery))
        classCodeFont = font.Font(*self.classSurface_classCodeFont)
        classCodeText = classCodeFont.render(classCode, True, (0, 0, 0))
        classTitleFont = font.Font(*self.classSurface_classTitleFont)
        classTitleText = classTitleFont.render(title, True, (0, 0, 0))
        classInstructorFont = font.Font(*self.classSurface_classInstructorFont)
        classInstructorText = classInstructorFont.render(
            instructor, True, (0, 0, 0))
        classSurface = Surface((width, height))
        classSurface.fill(bg)
        classSurface.blit(roomSurface, self.classSurface_floorSurface_buffer)
        classSurface.blit(classCodeText,
                          (roomSurface.get_rect().right +
                           self.classSurface_classCodeLeftBuffer,
                           classSurface.get_rect().centery -
                           classCodeText.get_rect().centery))
        classSurface.blit(classTitleText,
                          (roomSurface.get_rect().right +
                           self.classSurface_classCodeLeftBuffer +
                           self.classSurface_classTitleLeftBuffer,
                           classSurface.get_rect().centery -
                           classTitleText.get_rect().centery))
        classSurface.blit(classInstructorText,
                          (classSurface.get_rect().width -
                           classInstructorText.get_rect().width - 10,
                           classSurface.get_rect().centery +
                           classTitleText.get_rect().centery -
                           classInstructorText.get_rect().height - 4))
        return classSurface

    def CreateClassesSurface(self, classes):
        print('CreateClassesSurface')
        if classes:
            timeSurfaceText = ConvertMilitaryToStd(classes[0]['LEC']['Start'])
        else:
            timeSurfaceText = 'No More Classes Today'
        timeSurfaceFont = font.Font(*self.timeSlotFont)
        timeSurface = timeSurfaceFont.render(timeSurfaceText, True,
                                             (51, 51, 51))
        classSurfaceHeight = (self.height -
                              (timeSurface.get_rect().height * 2) -
                              ((self.scheduleDisplay_numberOfClasses - 1) *
                               self.classSurface_heightBuffer)
                              ) / self.scheduleDisplay_numberOfClasses
        classesSurfaceHeight = timeSurface.get_rect().height + (
            len(classes) *
            (classSurfaceHeight + self.classSurface_heightBuffer))
        classesSurface = Surface(
            (self.width -
             (self.classSurface_widthBuffer * 2), classesSurfaceHeight),
            SRCALPHA, 32)
        classesSurface.blit(timeSurface, (self.classSurface_widthBuffer, 0))
        for i, meeting in enumerate(classes):
            nextClass = self.CreateClassSurface(
                meeting, self.classSurface_bgColors[i % 2],
                self.width - (2 * self.classSurface_widthBuffer),
                classSurfaceHeight)
            classesSurface.blit(nextClass,
                                (0, timeSurface.get_rect().height +
                                 (nextClass.get_rect().height +
                                  self.classSurface_heightBuffer) * i))
        classesSurface.convert_alpha()
        if timeSurfaceText != 'No More Classes Today':
            currentTime = datetime.now()
            timeSlot = datetime.combine(
                currentTime.date(),
                datetime.strptime(timeSurfaceText, '%I:%M %p').time())
            timeSlot = timeSlot + timedelta(minutes=self.timeSlotDisplayBuffer)
            self.classesSurfacesAndTimes.append((classesSurface, timeSlot))
        return classesSurface

    def UpdateJson(self):
        print('UpdateJson')
        try:
            currentTermDict = GetCurrentTerm()
        except TimeoutError:
            print("TIMEOUT CURRENTTERM")
            return
        schedulePath = path.join(self.dir_path, self.scheduleFile)
        if currentTermDict['Term']:
            self.term = currentTermDict['Term']
            self.scheduler.modify_job('CompileSubjects01',
                                      args=[
                                          self.building, self.term,
                                          self.location,
                                          path.join(self.dir_path,
                                                    self.compiledSubjectsFile)
                                      ])
            subjectsPath = path.join(self.dir_path, self.compiledSubjectsFile)
            try:
                newClassesList = CreateClassesList(self.building, self.term,
                                                   self.location, subjectsPath)
            except TimeoutError:
                print("TIMEOUT NEWCLASSLIST")
                return
            newClassesList.insert(0, currentTermDict)
            if path.isfile(schedulePath) and path.getsize(schedulePath) > 0:
                currentClassesList = LoadJsonToList(schedulePath)
                if newClassesList != currentClassesList:
                    DumpListToJson(newClassesList, schedulePath)
            else:
                open(path.join(self.dir_path, self.scheduleFile), 'w').close()
                DumpListToJson(newClassesList, schedulePath)
        else:
            emptyList = [currentTermDict]
            DumpListToJson(emptyList, self.scheduleFile)

    def InitializeJsonData(self):
        schedulePath = path.join(self.dir_path, self.scheduleFile)
        if not path.isfile(path.join(self.dir_path,
                                     self.compiledSubjectsFile)):
            CompileSubjectsInBuilding(
                self.building, self.term, self.location,
                path.join(self.dir_path, self.compiledSubjectsFile))
        if path.isfile(schedulePath) and path.getsize(schedulePath) > 0:
            termDict = LoadJsonToList(schedulePath)[0]
            if termDict['Term']:
                self.term = termDict['Term']
                self.scheduler.modify_job(
                    'CompileSubjects01',
                    args=[
                        self.building, self.term, self.location,
                        path.join(self.dir_path, self.compiledSubjectsFile)
                    ])
                currentDate = datetime.now()
                startDate = datetime.strptime(termDict['Start'], '%m/%d/%Y')
                endDate = datetime.strptime(termDict['End'], '%m/%d/%Y')
                if not (startDate <= currentDate and currentDate <= endDate):
                    self.UpdateJson()
                    print(1)
                if len(LoadJsonToList(schedulePath)) <= 1:
                    self.UpdateJson()
                    print(2)
            else:
                self.UpdateJson()
                print(3)
        else:
            self.UpdateJson()
            print(4)