def main():
    config.parse_args(sys.argv)
    logging.setup(CONF, "billing")

    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {
        'coalesce': True,
        'max_instances': 2,
        'misfire_grace_time': 3600
    }
    scheduler = BlockingScheduler(executors=executors,
                                  job_defaults=job_defaults,
                                  timezone=CONF.timezone_local)
    #    scheduler.add_jobstore('sqlalchemy', url=CONF.database.connection)
    #    scheduler.add_job(tick, 'interval', seconds=10,id="abcdefg")
    #    scheduler.add_job(tick, 'cron', second='5,10,15,20,25,30,35,40,45,50,55',id="bill_generation")
    scheduler.get_job("bill_generation") or scheduler.add_job(
        tick,
        'cron',
        month=CONF.scheduler_time.bill_generation_month,
        day=CONF.scheduler_time.bill_generation_day,
        hour=CONF.scheduler_time.bill_generation_hour,
        minute=CONF.scheduler_time.bill_generation_minute,
        id="bill_generation")
    scheduler.start()
Example #2
0
class App:
    def __init__(self):
        self.logger = logging.getLogger(App.__name__)
        self.logger.info('launched Karakteraz')

        try:
            with open('app.yml', 'r') as stream:
                self.configuration = yaml.load(stream)['karakteraz']
        except yaml.YAMLError:
            self.logger.exception('configuration format error')
            sys.exit(1)
        except FileNotFoundError:
            self.logger.exception('configuration not found')
            sys.exit(1)

        self.configuration['notification']['email'] = None if sum(
            [1 for _, v in self.configuration['notification']['email'].items() if v is None]) > 0 else \
            self.configuration['notification']['email']
        self.configuration['notification']['telegram'] = None if sum(
            [1 for _, v in self.configuration['notification']['telegram'].items() if v is None]) > 0 else \
            self.configuration['notification']['telegram']
        self.configuration['watch-list'] = [x.upper() for x in self.configuration['watch-list']]
        self.configuration['results-page'] = 'https://fsweb.no/studentweb/resultater.jsf'
        self.logger.info(
            'installed configuration:\n{}'.format(yaml.dump(self.configuration, default_flow_style=False)))

        self.interval = durationpy.from_str(self.configuration['frequency']).total_seconds()
        self.driver = webdriver.Firefox()
        self.driver.wait = WebDriverWait(self.driver, 10)
        self.scheduler = BlockingScheduler()

    def start(self):
        self.scheduler.add_job(self.trigger_schedule, trigger='cron', hour='6,23')
        job = self.scheduler.add_job(fetch_grades, id='fetch', trigger='interval', seconds=self.interval,
                                     args=[self.driver, self.logger, self.configuration], coalesce=True)

        time = datetime.now()
        if time.hour >= 23 or time.hour < 6:
            job.pause()

        atexit.register(lambda: self.scheduler.shutdown(wait=False))
        atexit.register(lambda: self.driver.quit())
        self.scheduler.start()


    def trigger_schedule(self):
        job = self.scheduler.get_job('fetch')
        if datetime.now().hour == 6:
            job.resume()
            self.logger.info('ready for a new days work. Resuming scheduled tasks')
        else:
            job.pause()
            self.logger.info('sleeping for the night.')
Example #3
0
class Updater:
    def __init__(self):
        logging.info('Reading configuration')
        self.configuration = Configuration()
        if len(self.configuration['no-ip-authorization']) is 0:
            username = input("Enter the no-ip username: "******"Enter the password: "******"{}:{}".format(username, password).encode('utf-8')).decode()
            self.configuration['no-ip-hostname'] = input(
                "Enter the hostname (ex. mytest.testdomain.com): ")
            self.configuration['no-ip-update-interval-minutes'] = \
                input("Enter the interval to no-ip update in minutes: ")
            self.configuration.save_configuration()
            print("Configuration was saved. Install application on /opt/No-IP-Updater/ and " + \
                  "use 'systemctl start noip-updater.service'")
            exit(0)

        logging.info('Creating scheduler')
        jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
        }
        self.scheduler = BlockingScheduler(jobstores)

        if self.scheduler.get_job(job_id='no-ip-update-task') is None:
            logging.info("Registering job 'no-ip-update-task'")
            self.scheduler.add_job(
                noip_update,
                'interval', [
                    self.configuration['no-ip-authorization'],
                    self.configuration['no-ip-hostname']
                ],
                id='no-ip-update-task',
                max_instances=1,
                replace_existing=True,
                minutes=int(
                    self.configuration['no-ip-update-interval-minutes']))

        # Register listener
        logging.info('Registering listener')
        self.scheduler.add_listener(task_listener,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        logging.info('Registering quit task')
        atexit.register(self.exit_handler)

        logging.info('Starting scheduler')
        self.scheduler.start()

    def exit_handler(self):
        logging.info('Quitting scheduler')
        self.scheduler.shutdown()
        logging.shutdown()
Example #4
0
class abstract_schedule(metaclass=ABCMeta):

    def __init__(self):
        self.sched = BlockingScheduler()

    def lstCronJob(self, job_id=None):
        result = {}
        if not job_id:
            jobs = self.sched.get_jobs()
            for j in jobs:
                result[j.id] = j
        else:
            jobs = self.sched.get_job(job_id)
            result[job_id] = jobs
        return result

    def delCronJob(self, job_id):
        jobs = self.lstCronJob(job_id)
        if not jobs:
            sys.stdout.write("Job %s not found" %job_id)
        else:
            self.sched.remove_job(job_id)
            sys.stdout.write("Job %s 删除成功!"%job_id)
            return True

    def addCronJob(self, job_id, func, policy, args):
        cron = CronTrigger(**policy)
        self.sched.add_job(func, cron, args=args, id=job_id)

    def start(self):
        print("123123")
        self.sched.add_job(self.autoAddJob, IntervalTrigger(seconds=5), id="autoAddJob")
        self.sched.start()

    def autoAddJob(self):
        history_jobs = self.lstCronJob()
        print(history_jobs, 'history_jobs')

        current_jobs = self.getBackupPolicy()
        print(current_jobs, 'current_jobs')

        only_current_jobs = set(current_jobs.keys()).difference(set(history_jobs.keys()))
        print(only_current_jobs, 'only_current_jobs')
        # 当前任务调度列表中有的 历史任务列表中没有的

        only_history_jobs = set(history_jobs.keys()).difference(set(current_jobs.keys()))
        print(only_history_jobs, 'only_history_jobs')
        #历史任务中有的当前任务列表中没有的任务
        #
        for j in only_history_jobs:
            if j == 'autoAddJob':
                continue
            self.delCronJob(job_id=j)

        for j in only_current_jobs:
            func = current_jobs[j].pop('func')
            args = current_jobs[j].pop('args')
            policy = current_jobs[j]
            self.addCronJob(job_id=j, func=func, policy=policy, args=args)

    @abstractmethod
    def getBackupPolicy(self):
        pass
Example #5
0
    'redis': RedisJobStore(host='172.16.0.121',port=6379,db=0),      #用redis作backend
}

# ZRANGE apscheduler.run_times 0 1
# HGETALL apscheduler.jobs

executors = {
    'default': ThreadPoolExecutor(10),      #默认线程数
    'processpool': ProcessPoolExecutor(3)   #默认进程
}
sched = BlockingScheduler(jobstores=jobstores, executors=executors)

def aps_test():
    print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'H')

# @scheduler.scheduled_job('interval', seconds=3,id='job003')
# def test03():
#     print('test03......')

#添加任务
sched.add_job(func=aps_test, trigger='cron', second='*/5',id='job001',jobstore='redis',replace_existing=True)

#查看任务状态
print(sched.get_job(job_id='job001'))

#移除任务
# scheduler.remove_job('job001')
# print(scheduler.get_job(job_id='job001'))

sched.start()
Example #6
0
        del resource_dict['updated_at']
        del resource_dict['deleted_at']
        '''append item from   using table;'''
        resource_dict['using_id'] = each['using_id']
        resource_dict['started_at'] = each['started_at']
        resource_dict['ended_at'] = each['ended_at']

        #print "DDD %s"%resource_dict;
        send_mq_message_pconn(connection, exchange, resource_dict)


if __name__ == "__main__":
    from apscheduler.schedulers import *
    from apscheduler.schedulers.blocking import BlockingScheduler
    from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor

    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {'coalesce': False, 'max_instances': 60}

    schedudler = BlockingScheduler(executors=executors,
                                   job_defaults=job_defaults)
    schedudler.add_job(resend_mq,
                       'cron',
                       minute='5,10,15,20,25,30,35,40,45,50,55',
                       id="five_minute")
    print schedudler.get_job("five_minute")
    schedudler.start()
Example #7
0
class FreezerScheduler(object):
    def __init__(self, apiclient, interval, job_path):
        # config_manager
        self.client = apiclient
        self.freezerc_executable = spawn.find_executable('freezerc')
        if self.freezerc_executable is None:
            # Needed in the case of a non-activated virtualenv
            self.freezerc_executable = spawn.find_executable(
                'freezerc', path=':'.join(sys.path))
        self.job_path = job_path
        self._client = None
        self.lock = threading.Lock()
        self.execution_lock = threading.Lock()
        job_defaults = {
            'coalesce': True,
            'max_instances': 1
        }
        self.scheduler = BlockingScheduler(job_defaults=job_defaults)
        if self.client:
            self.scheduler.add_job(self.poll, 'interval',
                                   seconds=interval, id='api_poll')

        self.add_job = self.scheduler.add_job
        self.remove_job = self.scheduler.remove_job
        self.jobs = {}

    def get_jobs(self):
        if self.client:
            job_doc_list = utils.get_active_jobs_from_api(self.client)
            try:
                utils.save_jobs_to_disk(job_doc_list, self.job_path)
            except Exception as e:
                logging.error('Unable to save jobs to {0}. '
                              '{1}'.format(self.job_path, e))
            return job_doc_list
        else:
            return utils.get_jobs_from_disk(self.job_path)

    def start_session(self, session_id, job_id, session_tag):
        if self.client:
            return self.client.sessions.start_session(session_id,
                                                      job_id,
                                                      session_tag)
        else:
            raise Exception("Unable to start session: api not in use.")

    def end_session(self, session_id, job_id, session_tag, result):
        if self.client:
            return self.client.sessions.end_session(session_id,
                                                    job_id,
                                                    session_tag,
                                                    result)
        else:
            raise Exception("Unable to end session: api not in use.")

    def upload_metadata(self, metadata_doc):
        if self.client:
            self.client.backups.create(metadata_doc)

    def start(self):
        utils.do_register(self.client)
        self.poll()
        self.scheduler.start()

    def update_job(self, job_id, job_doc):
        if self.client:
            try:
                return self.client.jobs.update(job_id, job_doc)
            except Exception as e:
                logging.error("[*] Job update error: {0}".format(e))

    def update_job_status(self, job_id, status):
        doc = {'job_schedule': {'status': status}}
        self.update_job(job_id, doc)

    def is_scheduled(self, job_id):
        return self.scheduler.get_job(job_id) is not None

    def create_job(self, job_doc):
        job = Job.create(self, self.freezerc_executable, job_doc)
        if job:
            self.jobs[job.id] = job
            logging.info("Created job {0}".format(job.id))
        return job

    def poll(self):
        try:
            work_job_doc_list = self.get_jobs()
        except Exception as e:
            logging.error("[*] Unable to get jobs: {0}".format(e))
            return

        work_job_id_list = []

        # create job if necessary, then let it process its events
        for job_doc in work_job_doc_list:
            job_id = job_doc['job_id']
            work_job_id_list.append(job_id)
            job = self.jobs.get(job_id, None) or self.create_job(job_doc)
            if job:
                job.process_event(job_doc)

        # request removal of any job that has been removed in the api
        for job_id, job in self.jobs.iteritems():
            if job_id not in work_job_id_list:
                job.remove()

        remove_list = [job_id for job_id, job in self.jobs.items()
                       if job.can_be_removed()]

        for k in remove_list:
            self.jobs.pop(k)

    def stop(self):
        try:
            self.scheduler.shutdown(wait=False)
        except:
            pass

    def reload(self):
        logging.warning("reload not supported")
Example #8
0
if __name__ == "__main__":
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {'coalesce': False, 'max_instances': 60}

    log_init()
    schedudler = BlockingScheduler(executors=executors,
                                   job_defaults=job_defaults)

    #schedudler.add_job(collect_res, 'cron', second='0',id="one_miniuts")
    #print schedudler.get_job("one_miniuts")

    schedudler.add_job(collect_res, 'cron', hour='0-23', id="hourly")
    print schedudler.get_job("hourly")

    schedudler.start()
"""
    #help function 
    #schedudler.add_job(tick, 'cron', second='5,10,15,20,25,30,35,40,45,50,55',id="five_second")
    #print schedudler.get_job("five_second")

    #print help(schedudler)

    #@schedudler.cron_schedule(second='*', day_of_week='0-7', hour='0-23')  
    #def quote_send_sh_job():  
    #    print 'a simple cron job start at', datetime.datetime.now()  

"""
Example #9
0
# 精确时间执行
# scheduler.add_job(job1, 'date', run_date=datetime(2020, 8, 8, 16, 30, 5), id='job1')
# scheduler.add_job(job1, 'date', run_date=‘2020-8-8 16:30:5’, id='job1')

# 每5秒执行一次
scheduler.add_job(job1, 'interval', seconds=5, id='job1')
scheduler.add_job(job1, 'interval', seconds=5, id='job2')

# 移除任务,得在start()之前
# scheduler.remove_job('job1')

# 暂停作业
# scheduler.pause_job('job1')

# 恢复作业
# scheduler.resume_job('job1')

# 获取作业列表
jobs = scheduler.get_jobs()
print(jobs)

# 根据id获取某个作业
job = scheduler.get_job('job1')
print(job)

scheduler.start()

# 关闭调度器,加上false表示为不想等待
scheduler.shutdown()
scheduler.shutdown(wait=False)
Example #10
0
        send_mq_message_pconn(connection, exchange, each)
        save_to_resource_mq_buffer_pconn(session, each)
        save_to_using_mq_buffer_pconn(session, each['mq_uuid'],
                                      each['resource_id'])
        if count == 1024:
            session.commit()
            count = 0

    session.close()
    print('end of  collecting   resource at %s' % datetime.now())


if __name__ == "__main__":
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {'coalesce': False, 'max_instances': 60}

    log_init()
    schedudler = BlockingScheduler(executors=executors,
                                   job_defaults=job_defaults)

    schedudler.add_job(collect_cdn_res, 'cron', second='0', id="one_miniuts")
    print schedudler.get_job("one_miniuts")

    schedudler.add_job(collect_cdn_res, 'cron', day='0-7', id="dailiy")
    print schedudler.get_job("dailiy")

    schedudler.start()
Example #11
0
class FreezerScheduler(object):
    def __init__(self, apiclient, interval, job_path):
        # config_manager
        self.client = apiclient
        self.freezerc_executable = spawn.find_executable('freezer-agent')
        if self.freezerc_executable is None:
            # Needed in the case of a non-activated virtualenv
            self.freezerc_executable = spawn.find_executable('freezer-agent',
                                                             path=':'.join(
                                                                 sys.path))
        LOG.debug('Freezer-agent found at {0}'.format(
            self.freezerc_executable))
        self.job_path = job_path
        self._client = None
        self.lock = threading.Lock()
        self.execution_lock = threading.Lock()
        job_defaults = {'coalesce': True, 'max_instances': 1}
        self.scheduler = BlockingScheduler(job_defaults=job_defaults)
        if self.client:
            self.scheduler.add_job(self.poll,
                                   'interval',
                                   seconds=interval,
                                   id='api_poll')

        self.add_job = self.scheduler.add_job
        self.remove_job = self.scheduler.remove_job
        self.jobs = {}

    def get_jobs(self):
        if self.client:
            job_doc_list = utils.get_active_jobs_from_api(self.client)
            try:
                utils.save_jobs_to_disk(job_doc_list, self.job_path)
            except Exception as e:
                LOG.error('Unable to save jobs to {0}. '
                          '{1}'.format(self.job_path, e))
            return job_doc_list
        else:
            return utils.get_jobs_from_disk(self.job_path)

    def start_session(self, session_id, job_id, session_tag):
        if self.client:
            return self.client.sessions.start_session(session_id, job_id,
                                                      session_tag)
        else:
            raise Exception("Unable to start session: api not in use.")

    def end_session(self, session_id, job_id, session_tag, result):
        if self.client:
            return self.client.sessions.end_session(session_id, job_id,
                                                    session_tag, result)
        else:
            raise Exception("Unable to end session: api not in use.")

    def upload_metadata(self, metadata_doc):
        if self.client:
            self.client.backups.create(metadata_doc)

    def start(self):
        utils.do_register(self.client)
        self.poll()
        self.scheduler.start()

    def update_job(self, job_id, job_doc):
        if self.client:
            try:
                return self.client.jobs.update(job_id, job_doc)
            except Exception as e:
                LOG.error("[*] Job update error: {0}".format(e))

    def update_job_status(self, job_id, status):
        doc = {'job_schedule': {'status': status}}
        self.update_job(job_id, doc)

    def is_scheduled(self, job_id):
        return self.scheduler.get_job(job_id) is not None

    def create_job(self, job_doc):
        job = scheduler_job.Job.create(self, self.freezerc_executable, job_doc)
        if job:
            self.jobs[job.id] = job
            LOG.info("Created job {0}".format(job.id))
        return job

    def poll(self):
        try:
            work_job_doc_list = self.get_jobs()
        except Exception as e:
            LOG.error("[*] Unable to get jobs: {0}".format(e))
            return

        work_job_id_list = []

        # create job if necessary, then let it process its events
        for job_doc in work_job_doc_list:
            job_id = job_doc['job_id']
            work_job_id_list.append(job_id)
            job = self.jobs.get(job_id, None) or self.create_job(job_doc)
            if job:
                job.process_event(job_doc)

        # request removal of any job that has been removed in the api
        for job_id, job in six.iteritems(self.jobs):
            if job_id not in work_job_id_list:
                job.remove()

        remove_list = [
            job_id for job_id, job in self.jobs.items()
            if job.can_be_removed()
        ]

        for k in remove_list:
            self.jobs.pop(k)

    def stop(self):
        try:
            self.scheduler.shutdown(wait=False)
        except Exception:
            pass

    def reload(self):
        LOG.warning("reload not supported")
Example #12
0
__date__ = "2017-12-26"
'''
python3使用apscheduler判断是否存在job
'''

from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import datetime


def job():
    print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    sched.print_jobs()


# 定义BlockingScheduler
sched = BlockingScheduler()
sched.add_job(job, 'interval', seconds=3, id='test_job1')
sched.add_job(job, 'interval', seconds=50, id='test_job2')
print(sched.get_job('test_job1'))
print(sched.get_job('test_job1111'))
print(type(sched.get_job('test_job1111')))
if sched.get_job('test_job1111') is None:
    print("haha")
if sched.get_job('test_job2') is not None:
    print("lala")
    # apscheduler 让job立即运行
    sched.get_job('test_job2').func()
    print("lala")
sched.start()
print('hi')
# sched.modify_job('test_job1', seconds=6)
Example #13
0
class SchedUtility(object, metaclass=Singleton):
    
    def __init__(self):
        try:
            self.Global = Global()
            self.Utility = Utility()
            self.InfraUtil = InfraUtility()
            self.db = DBMySql('Scheduler')

            self.myModulePyFile = os.path.abspath(__file__)
            self.myClass = self.__class__.__name__

            #Setting the infrastructure
            self.Infra = self.InfraUtil.setInfra(self.Global.SchedulerInfraKey)
            if not self.Infra:
                raise InfraInitializationError('Could not initialize {cls}'.format(cls=(self.myModulePyFile,self.myClass)))

            # we need to get the proper logger for a given module
            self.logger = self.Infra.getInfraLogger(self.Global.SchedulerInfraKey)

            # loading Schduler config and starting scheduler
            self.__startScheduler__()

        except Exception as err:
            raise err

    def __startScheduler__(self):

        try:
            mySchedulerType = self.Global.DefaultSchedulerType
            mySchedulerMode = self.Global.DefaultSchedulerMode

            if mySchedulerMode == 'Run':
                myArgPaused = False
            else:
                myArgPaused = True
            #fi

            mySchedulerConfig = self.Utility.getACopy(self.Infra.schedulerConfigData)

            if mySchedulerType == 'Background':
                self.Scheduler = BackgroundScheduler(mySchedulerConfig)
            else:
                self.Scheduler = BlockingScheduler(mySchedulerConfig)
            #fi

            if not self.Scheduler.running:
                self.Scheduler.start(paused = myArgPaused)

        except Exception as err:
            raise err

    def getAllJobDetail(self):
        '''
        Description: Returns all jobs as stored in scheduler
        '''
        myJobDetail = []
        
        for job in self.Scheduler.get_jobs():
            myJobDetail.append(self.getAJobDetail(job.id))

        return myJobDetail

    def getAJobDetail(self, jobIdArg):
        '''
        Description: Print all jobs as stored in scheduler
        '''
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        myJobDetail = job.__getstate__()

        return myJobDetail

    def suspendJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.pause()

    def resumeJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.resume()

    def getCurrentlyExecutingJob(self):
        return len(self.Scheduler.get_jobs())

    def removeJob(self, jobId):
        try:
            self.Scheduler.remove_job(jobId)
        except JobLookupError as err:
            print('Invalid Job !!')

    def removeAllJobs(self):
        try:
            self.Scheduler.remove_all_jobs()
        except Exception as err:
            raise err

    def getAllJobsFromRep(self):
        for job in self.Scheduler.get_jobs():
            myJobDetail = self.Scheduler.get_job(job.id)    
            print(job,myJobDetail)

    def getNewJob(self,prefixArg):
        # random number between 10 and 99 to ensure we always get 2 digit
        if isinstance(prefixArg,str) and prefixArg is not None:
            return prefixArg + '_' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99)))
        else:
            return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99))

    def getJobInfoFromDb(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.processDbRequest(operation = self.Global.fetch, container = 'ScheduledJobs', contents = ['*'], criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            return myResponse

    def getNextSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria) + 1

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getCurrentSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getElapsedStatsForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def processJobStartEvent(self, jobIdArg):
        '''
        1. Mark job started in ScheduledJobs
        2. Create new entry for this job in ScheduledJobsRunLog
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                # building data for SchedulerJobsRunLog
                myJobCriteria = ' JobId = %s' %repr(myJobId)
                myNextSeqForJob = self.getNextSeqForJob(myJobId)

                # will mark the job started and creat the run log for this run
                self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={'Status': 'Executing'}, criteria = myJobCriteria, commitWork=True )
                
                # creating run information
                self.db.processDbRequest(operation='create', container='ScheduledJobsRunLog', \
                        dataDict={'JobId':myJobId, 'Seq' : myNextSeqForJob,  'ExecutionStarted': self.Utility.getCurrentTime()}, commitWork=True )

                self.Utility.buildResponse(myResponse, self.Global.Success, self.Global.Success, {'Seq':myNextSeqForJob})
            else:
                self.Utility.buildResponse(myResponse, self.Global.UnSuccess, 'Cound not find job details for job {job}'.format(job = myJobId))

            return myResponse

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            #raise err # will raise the error so this can be logged by scheduler as an error occurred in processing job
            return myResponse

    def processJobFinishEvent(self, jobIdArg, execDetailsArg):
        '''
        1. Mark job completed (update failure cnt and total count and consc fail count, lastrunstatus) in ScheduledJobs
        2. Update ScheduledJobsRunlog container
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            myExecDetails = execDetailsArg
            myJobStatus = self.Global.NextJobRun
            
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                self.logger.debug('Job details found, proceeding with finish event')
                myJobCriteria = 'JobId = %s' %repr(myJobId)
                myCurrentSeqForJob = self.getCurrentSeqForJob(myJobId)
                myJobRunCriteria = ' JobId = %s and Seq = %s ' %(repr(myJobId), myCurrentSeqForJob)

                self.logger.debug('Job criteria {criteria}'.format(criteria = myJobCriteria))
                self.logger.debug('Job criteria with seq {criteria}'.format(criteria = myJobRunCriteria))

                myJobDetailsFromSched = self.getAJobDetail(myJobId)

                # Updating execution details in ScheduledJobsRunLog
                self.logger.debug('udating statistics of this run')

                myDbResult = self.db.processDbRequest(operation = 'change', container = 'ScheduledJobsRunLog', \
                    dataDict={
                        'Status': myExecDetails['Status'], 'ElapsedSeconds':myExecDetails['Data']['ElapsedSecs'],
                        'ExecutionCompleted': self.Utility.getCurrentTime(), 'ExecutionDetail': json.dumps(myExecDetails['Data']) 
                    }, criteria = myJobRunCriteria, commitWork=True )

                self.logger.debug('ScheduledJobsRunLog: db results >> {results}'.format(results = myDbResult))

                # Updating execution details in ScheduledJobs
                #if myExecDetails['Status'] == self.Global.Success:
                    # if success, reset consecfailcnt to 0, increment totalrun by 1 and update next run
                myElapsedStats = self.db.executeDynamicSql(\
                    operation = 'fetch', \
                    sql_text = 'select min(ElapsedSeconds) "Min", max(ElapsedSeconds) "Max", avg(ElapsedSeconds) "Avg" from ScheduledJobsRunLog')

                self.logger.debug('Elapsed Stats: {stats}'.format(stats = myElapsedStats))

                myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={
                        'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                        'NextRun' : myJobDetailsFromSched['next_run_time'].strftime('%Y-%m-%d% %H:%M:%S'), 'LatConsecFailCnt' : 0,
                        'MinElapsedSecs' : myElapsedStats['Data'][0]['Min'], 'MaxElapsedSecs' : myElapsedStats['Data'][0]['Min'] , 
                        'AvgElapsedSecs' : myElapsedStats['Data'][0]['Avg']  
                    }, criteria = myJobCriteria, commitWork=True )

                self.logger.debug('ScheduledJobs: last stats update >> {result}'.format(result = myDbResult))

                #self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
                '''
                else:
                    # process job was unsuccessful
                    if myJobDetailsFromDb[0]['LatConsecFailCnt'] >= self.Global.SchedConsecFailCntThreshold:
                        myJobStatus = self.Global.SuspendMode
                        self.logger.info('suspending job {job}'.format(job=myJobId))
                        self.suspendJob(myJobId)

                    myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                        dataDict={
                            'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                            'next_run' : myJobDetailsFromSched['next_run_time'], 'LatConsecFailCnt' : myJobDetailsFromDb[0]['LatConsecFailCnt'] + 1, 
                            'TotalFailure' :  myJobDetailsFromDb[0]['TotalFailure' + 1]
                        }, criteria = myJobCriteria, commitWork=True )
                    # will suspend the job if total failure count has been reached beyond Total consecutive failure threshold
                    self.Utility.buildResponse(myResponse, self.Global.UnSuccess,self.Global.UnSuccess)
                    raise processJobError(myExecDetails['Message'])
                '''
            self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
            return myResponse
        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess, myErrorMsg)
            return myResponse
def test_EndpointStatuses(mocker):
    """Tests the main workflow of endpointStatuses.

    We mock away at two places,
    gordo_components.watchman.endpoints_status.watch_for_model_server_service
    which listens for kubernetes events is mocked away, and we construct the events
    manually.

    And the job-scheduler is never actually started, so we dont proceed to do any of
    the jobs, we just check that they are added/removed as desired.

    """

    # We will never call start on the scheduler, so we wont actually do any of the
    # scheduled jobs|
    scheduler = BlockingScheduler()
    project_name = "super_project"
    project_version = "101"
    namespace = "somenamespace"
    host = "localhost"
    target_names = ["target 1", "target 2"]
    mocked_watch = mocker.patch(
        "gordo_components.watchman.endpoints_status.watch_for_model_server_service"
    )
    eps = EndpointStatuses(
        scheduler=scheduler,
        project_name=project_name,
        ambassador_host=host,
        model_names=target_names,
        project_version=project_version,
        namespace=namespace,
    )

    assert namespace == mocked_watch.call_args[1]["namespace"]
    assert project_name == mocked_watch.call_args[1]["project_name"]
    assert project_version == mocked_watch.call_args[1]["project_version"]
    event_handler = mocked_watch.call_args[1]["event_handler"]

    # Before receiving any events we only have the targets in `target_names`
    cur_status = eps.statuses()
    assert set([ep["target"] for ep in cur_status]) == set(target_names)

    # And none of them are healthy
    assert all([ep["healthy"] is False for ep in cur_status])

    # Lets start adding some events.

    # Target 1 is online!
    # We make a fake event
    mock_event_obj = MagicMock()
    mock_event_obj.metadata = MagicMock()
    mock_event_obj.metadata.labels = {
        "applications.gordo.equinor.com/model-name": "target 1"
    }
    # And we let the caller know about it
    event_handler({"type": "ADDED", "object": mock_event_obj})

    # The job to update target 1 is added to the joblist
    jobs = scheduler.get_jobs()
    assert len(jobs) == 1
    assert scheduler.get_job("update_model_metadata_target 1") is not None

    # Target 2 is up as well
    mock_event_obj.metadata.labels[
        "applications.gordo.equinor.com/model-name"] = "target 2"
    event_handler({"type": "ADDED", "object": mock_event_obj})

    jobs = scheduler.get_jobs()
    assert len(jobs) == 2
    assert scheduler.get_job("update_model_metadata_target 2") is not None

    # Oida, target 1 seems to be removed!
    mock_event_obj.metadata.labels = {
        "applications.gordo.equinor.com/model-name": "target 1"
    }

    event_handler({"type": "DELETED", "object": mock_event_obj})

    jobs = scheduler.get_jobs()
    assert len(jobs) == 1
    assert scheduler.get_job("update_model_metadata_target 1") is None
    assert scheduler.get_job("update_model_metadata_target 2") is not None
Example #15
0
def main():
    config.parse_args()
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(3)
    }
    job_defaults = {
        'coalesce': True,
        'max_instances': 2,
        'misfire_grace_time': 3600
    }
    scheduler = BlockingScheduler(executors=executors,
                                  job_defaults=job_defaults,
                                  timezone="UTC")
    #    scheduler.add_executor('processpool')
    scheduler.add_jobstore('sqlalchemy', url=CONF.database.connection)
    print CONF.database.connection
    scheduler.add_job(tick, 'interval', seconds=10, id="abcdefg")
    #        scheduler.add_job(tick, 'cron', day='2,7,10,15',id="bill_generation")
    scheduler.get_job("get_instance_by_hour") or scheduler.add_job(
        collectInstance, 'cron', hour='*', id="get_instance_by_hour")
    scheduler.get_job("get_disk_by_hour") or scheduler.add_job(
        collectDisk, 'cron', hour='*', id="get_disk_by_hour")
    scheduler.get_job("get_snapshot_by_hour") or scheduler.add_job(
        collectSnapshot, 'cron', hour='*', id="get_snapshot_by_hour")
    scheduler.get_job("get_router_by_hour") or scheduler.add_job(
        collectRouter, 'cron', hour='*', id="get_router_by_hour")
    scheduler.get_job("get_ip_by_hour") or scheduler.add_job(
        collectIp, 'cron', hour='*', id="get_ip_by_hour")
    scheduler.get_job("get_image_by_hour") or scheduler.add_job(
        collectImage, 'cron', hour='*', id="get_image_by_hour")
    scheduler.get_job("get_vpn_by_hour") or scheduler.add_job(
        collectVpn, 'cron', hour='*', id="get_vpn_by_hour")
    scheduler.get_job("send_data_msg") or scheduler.add_job(
        send_data_msg, 'cron', minute='*/2', id="send_data_msg")
    #        print help(scheduler)
    scheduler.start()
Example #16
0
class ColorScheduler:
    mqttc: mqtt.Client
    homie_device: HomieDevice

    def __init__(self):
        self.scheduler = BlockingScheduler()
        # self.color_finder = ColorFinder(color_filter_hue)
        self.color_finder = ColorFinder(color_filter_hue_brightness)
        self.current_track = None

        self.init_mqtt()
        self.init_homie_device()

        self.scheduler.add_job(self.update_job,
                               "interval", (),
                               id="job_updater",
                               seconds=5)

    def on_connect(self, client, userdata, flags, rc, properties=None):
        self.homie_device.publish_config()

    def init_mqtt(self):
        self.mqttc = mqtt.Client()

        if Config.MQTT_USER is not None:
            self.mqttc.username_pw_set(Config.MQTT_USER, Config.MQTT_PASSWORD)

        self.mqttc.on_connect = self.on_connect

        self.mqttc.connect(Config.MQTT_HOST)
        self.mqttc.loop_start()

    def init_homie_device(self):
        homie_device = HomieDevice("spotibridge", self.mqttc)
        homie_device.name = "Spotibridge"
        homie_device.implementation = "SpotiBridge"
        homie_device.version = Version("4.0.0")
        homie_device.extensions = set()

        node = HomieNode("player", homie_device, True)
        node.name = "Player"
        node.type = "player"

        is_playing_property = HomieProperty("is-playing", node, True)
        is_playing_property.name = "Is Playing"
        is_playing_property.datatype = HomieDataType.BOOLEAN
        is_playing_property.value = False

        current_track_property = HomieProperty("track", node, True)
        current_track_property.name = "Track"
        current_track_property.datatype = HomieDataType.STRING
        current_track_property.value = ""

        dominant_album_color_property = HomieProperty("dominant-album-color",
                                                      node, True)
        dominant_album_color_property.name = "Dominant album color"
        dominant_album_color_property.datatype = HomieDataType.COLOR
        dominant_album_color_property.format = "rgb"
        dominant_album_color_property.value = (0, 0, 0)

        album_cover_palette_property = HomieProperty("album-cover-palette",
                                                     node, True)
        album_cover_palette_property.name = "Album cover color palette"
        album_cover_palette_property.datatype = HomieDataType.STRING
        album_cover_palette_property.value = "[]"

        self.homie_device = homie_device

    def start(self):
        self.scheduler.start()

    def get_color_palette(self, image: Image) -> List[Tuple[int, int, int]]:
        color_thief = ColorThief(image)
        palette = color_thief.get_palette(5, 1)
        return palette

    def set_color_palette(self, palette: List[Tuple[int, int, int]]):
        color_palette_property = self.homie_device.nodes["player"].properties[
            "album-cover-palette"]
        current_value = color_palette_property.value
        color_palette_property.value = json.dumps(palette)

        if current_value != color_palette_property.value:
            color_palette_property.publish_value()

    def update_job(self):
        def update_color_caller():
            self.set_color((0, 0, 0))
            self.set_color_palette([])

        token = util.prompt_for_user_token(
            Config.SPOTIFY_USERNAME,
            "user-read-playback-state",
            client_id=Config.SPOTIFY_CLIENT_ID,
            client_secret=Config.SPOTIFY_CLIENT_SECRET,
            redirect_uri=Config.SPOTIFY_REDIRECT_URI,
        )

        sp = Spotify(auth=token)

        current_track = sp.current_user_playing_track()

        if (current_track is None or not current_track["is_playing"]
                or current_track["item"] is None):
            job = self.scheduler.get_job("color_updater")

            if job is not None:
                job.remove()

            if job is not None or self.current_track is not None:
                self.set_color((0, 0, 0))
                self.set_color_palette([])
                self.set_is_playing(False)
                self.set_current_track_title("")

            return

        track_id = current_track["item"]["id"]

        if self.current_track is None:
            self.set_is_playing(True)

        if self.current_track != track_id:
            self.current_track = track_id

            cover_urls = current_track["item"]["album"]["images"]
            cover_url = cover_urls[0]["url"]

            response = requests.get(cover_url)
            response.raise_for_status()
            image = Image.open(BytesIO(response.content))

            self.set_color(self.color_finder.get_most_prominent_color(image))
            self.set_color_palette(self.get_color_palette(image))
            self.set_current_track_title(current_track["item"]["name"])

        # One cannot use this as this is not correct
        # now = datetime.fromtimestamp(current_track['timestamp'] / 1000)

        now = datetime.now()

        start_of_track = now - timedelta(
            milliseconds=current_track["progress_ms"])
        next_change = start_of_track + timedelta(
            milliseconds=current_track["item"]["duration_ms"])

        self.scheduler.add_job(
            update_color_caller,
            "date",
            (),
            id="color_updater",
            run_date=next_change,
            replace_existing=True,
        )

    def set_color(self, color: Tuple[int, int, int]) -> None:
        color_property = self.homie_device.nodes["player"].properties[
            "dominant-album-color"]

        if color_property.value != color:
            color_property.value = color
            color_property.publish_value()

    def set_is_playing(self, is_playing: bool):
        is_playing_property = self.homie_device.nodes["player"].properties[
            "is-playing"]

        if is_playing_property.value != is_playing:
            is_playing_property.value = is_playing
            is_playing_property.publish_value()

    def set_current_track_title(self, current_track_title: str):
        current_track_property = self.homie_device.nodes["player"].properties[
            "track"]

        if current_track_property.value != current_track_title:
            current_track_property.value = current_track_title
            current_track_property.publish_value()