Example #1
0
def beginJobService(rHelper):
    logger.info("Starting up")

    if rHelper.debug:
        dbName = 'jobs.sqlite'
    else:
        dbName = os.path.join(prodconf.DIR_PATH, "jobs.sqlite")

    logger.debug("DBPATH: %s" % dbName)

    jobstores = {'default': SQLAlchemyJobStore(url='sqlite:///%s' % dbName)}

    firstTrigger = CronTrigger(hour=22, minute=5)
    secondTrigger = CronTrigger(hour=10, minute=5)

    sched = BlockingScheduler(jobstores=jobstores)
    sched.add_job(rHelper.postDaily, trigger=firstTrigger)
    sched.add_job(rHelper.postDaily, trigger=secondTrigger)

    for w in weeklies:
        sched.add_job(rHelper.postWeekly, trigger=w.trigger, args=[w])

    try:
        logger.info("Starting blocking scheduler")
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        logger.info("Removing all jobs in end exception")
        sched.remove_all_jobs()
    finally:
        logger.info("Removing all jobs")
        sched.remove_all_jobs()
Example #2
0
class ProxyScheduler(object):

    # @my_log(log_name='schedule.log')
    def __init__(self, refresh_freq):
        self.manager = ProxyManager()
        self.scheduler = BlockingScheduler()
        self.lock = threading.Lock()
        refresh_run_time = datetime.datetime.now() + datetime.timedelta(
            minutes=1)
        clear_run_time = datetime.datetime.now() + datetime.timedelta(hours=12)
        # 每隔6个小时刷新一次代理池,每隔24小时清空可用代理并重新验证
        self.scheduler.add_job(self.refresh_proxy_pool,
                               'interval',
                               hours=refresh_freq,
                               next_run_time=refresh_run_time)
        self.scheduler.add_job(self.clear_proxy_pool,
                               'interval',
                               hours=24,
                               next_run_time=clear_run_time)

    # @my_log(log_name='schedule.log')
    def refresh_proxy_pool(self):
        self.lock.acquire()
        print('Refresh proxy pool: start.')
        print(datetime.datetime.now())
        unchecked_list = self.manager.get_unchecked_proxy()
        self.manager.insert_proxy_list(ProxyManager.all_list, unchecked_list)
        valid_list = self.manager.check_proxies()
        self.manager.insert_proxy_list(ProxyManager.valid_list, valid_list)
        print('Refresh proxy pool: done.')
        self.lock.release()

    # @my_log(log_name='schedule.log')
    def clear_proxy_pool(self):
        self.lock.acquire()
        print('Clear proxy pool: start.')
        print(datetime.datetime.now())
        self.manager.clear_list(ProxyManager.all_list)
        overdue_list = self.manager.get_all_proxies()
        self.manager.insert_proxy_list(ProxyManager.all_list, overdue_list)
        self.manager.clear_list(ProxyManager.valid_list)
        print('Clear proxy pool: done.')
        self.lock.release()

    # @my_log(log_name='schedule.log')
    def start_scheduler(self):
        self.scheduler.start()

    # @my_log(log_name='schedule.log')
    def __del__(self):
        self.scheduler.remove_all_jobs()
        print('ProxyScheduler: remove all jobs.')
Example #3
0
jobstores = {
    'mongo': MongoDBJobStore(),
    'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
executors = {
    'default': ThreadPoolExecutor(20),
    'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
    'coalesce': False,
    'max_instances': 3
}
scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

scheduler.remove_all_jobs()

scheduler.add_job(p, 'cron', second='*/5', minute='*', hour='*', day='*', month='*', day_of_week='0-6', year='*')

# scheduler.reschedule_job('my_job_id', trigger='cron', minute='*/5')
# scheduler.reschedule_job('my_job_id', trigger='cron', minute='*/5')
print("befor")
t = threading.Thread(target=scheduler.start)
t.start()
print("after")

def load(packname):
    exec("from libs.apscheduler.dynamic_load import "+packname)
    p = eval(packname+".run")
#     p()
    scheduler.add_job(p, 'cron', second='*/5', minute='*', hour='*', day='*', month='*', day_of_week='0-6', year='*')
Example #4
0
from datetime import datetime, timedelta
import sys
import os

from apscheduler.schedulers.blocking import BlockingScheduler


def alarm(time):
    print('Alarm! This alarm was scheduled at %s.' % time)


if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_jobstore('mongodb', collection='example_jobs')
    if len(sys.argv) > 1 and sys.argv[1] == '--clear':
        scheduler.remove_all_jobs()

    alarm_time = datetime.now() + timedelta(seconds=10)
    scheduler.add_job(alarm,
                      'date',
                      run_date=alarm_time,
                      args=[datetime.now()])
    print('To clear the alarms, run this example with the --clear argument.')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Example #5
0
class eagle_eye_bot(object):
    def __init__(self, stock_list=[]):
        self._scheduler = BlockingScheduler()
        self.stock_list = stock_list
        self._money_flows = {}
        self._start_time = {}
        for stock in stock_list:
            self._money_flows[stock] = money_flow_level()
            self._start_time[stock] = '00:00:00'

    def start(self):
        self._scheduler.add_job(self._start_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=9,
                                minute=29,
                                second=58)  # 启动监控9:29:58,先获取集合竞价的数据
        self._scheduler.add_job(self._end_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=11,
                                minute=30,
                                second=5)  # 结束上午的监控11:30:05
        self._scheduler.add_job(self._start_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=13,
                                minute=0,
                                second=0)  # 启动下午的监控
        self._scheduler.add_job(self._end_monitor_job,
                                'cron',
                                day_of_week='mon-fri',
                                hour=15,
                                minute=0,
                                second=5)  # 结束下午的监控15:00:05
        try:
            self._scheduler.start()
            print('start to monitor tick data...')
        except (KeyboardInterrupt, SystemExit):
            self._scheduler.remove_all_jobs()

    def _start_monitor_job(self):
        self._scheduler.add_job(self.__stock_monitor_on_second,
                                'interval',
                                seconds=3,
                                id='monitor_tick')

    def _end_monitor_job(self):
        self._scheduler.remove_job(job_id='monitor_tick')

    def add_stock(self, stock_code, levels=[]):
        """
        add code to be monitored
        :param stock_code:
        :return:
        """
        levels = fit_levels(levels)
        self.stock_list.append(stock_code)
        self._money_flows[stock_code] = money_flow_level(
            levels[0], levels[1], levels[2], levels[3])
        self._start_time[stock_code] = '00:00:00'

    def remove_stock(self, stock_code):
        """

        :param stock_code:
        :return:
        """
        self.stock_list.remove(stock_code)
        del self._money_flows[stock_code]
        del self._start_time[stock_code]

    def get_stock_money_flow(self, stock_code):
        """

        :param stock_code:
        :return:
        """
        return self._money_flows[stock_code]

    def __stock_monitor_on_second(self):  # 定时获取tick数据处理
        data = ts.get_realtime_quotes(self.stock_list)
        for _, item in data.iterrows():
            if item['time'] != self._start_time[item['code']]:  # 是否是新成交的数据
                self.__add_tick(item)
                self._start_time[item['code']] = item['time']

    def __add_tick(self, tick_data):
        price = float(tick_data['price'])
        if price <= float(tick_data['ask']):  # 小于等于买一价,卖盘
            tick_type = -1
        elif price >= float(tick_data['bid']):  # 大于等于卖一价,买盘
            tick_type = 1
        else:
            tick_type = 0
        self._money_flows[tick_data['code']].add_tick(
            float(tick_data['amount']), float(tick_data['volume']), tick_type)
Example #6
0
import os
import sys
from datetime import datetime, timedelta

from apscheduler.schedulers.blocking import BlockingScheduler

def alarm(time):
    print('Alarm! This is alarm was scheduled at %s' % time)


if __name__ == '__main__':
    sched = BlockingScheduler()
    sched.add_jobstore('redis')
    if len(sys.argv) > 1 and sys.argv[1] == '--clear':
        sched.remove_all_jobs()
    alarm_time = datetime.now() + timedelta(seconds=1)
    sched.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
    sched.add_job(alarm, 'interval', seconds=1)
    print('To clear the alarms, run this example with the --clear argument.')
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Example #7
0
class Quant:
    def __init__(self):
        # 启动就登录baostock系统
        bao_stock.login()
        self.today = time.strftime('%Y-%m-%d', time.localtime(time.time()))
        # 调度器
        self.scheduler = BlockingScheduler()
        """ 
        持仓
        明细  证券代码  证券名称 股票余额 可用余额 冻结数量  盈亏   成本价 盈亏比(%)  市价  市值 市场代码  交易市场  股东帐户 实际数量 资讯 买入 冻结 卖出冻结 股份实时余额
        0     000859   国风塑业   100      100     0     -35.100  5.701  -6.16   5.350  535.000    1   深A  0272058802  100       0    0    100
        1     600313   农发种业   100      100     0     -1.110   2.711  -0.41   2.700  270.000    2   沪A  A570522883  100       0    0    100
        """
        self.positions = pd.DataFrame()
        """
        账户余额信息
        可用金额  总市值  总资产     资金余额     资金帐户
        1028.44  805     1833.44    1028.44     210400045648
        """
        self.sub_accounts = pd.DataFrame()
        """ 前期的数据,一般是前一天的 """
        self.before_data = pd.DataFrame()
        """ 止损,滑点等 """
        self.pos_amount = 4
        self.max_loss = -0.03
        self.slippage = 0.001
        self.commission = 0.0005

        self.wait_stock = []

    def job(self, msg=''):
        print('标记信息:', msg)
        """
        任务调度
        判断是否是交易日,是则执行自动交易
        :return:
        """
        print('检查交易日:', self.today)
        self.today = time.strftime('%Y-%m-%d', time.localtime(time.time()))
        # self.today = '2019-11-29'
        trade_dates = bao_stock.get_trade_dates(start_date=self.today,
                                                end_date=self.today)
        if len(trade_dates) == 0 or trade_dates.at[0, 'is_trading_day'] == 0:
            print('今天不是交易日', self.today)
        else:
            # with open('../logo.txt', encoding='utf-8') as f:
            #             #     data = f.read()
            #             #     print(data)
            print('》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》》')

            self.before_open()
            print('正常交易日', self.today)
            self.scheduler = BlockingScheduler()
            """ 开盘前运行一次 """
            before_open_trigger = DateTrigger(
                run_date=f'{self.today} 09:15:00')
            self.scheduler.add_job(func=self.before_open,
                                   trigger=before_open_trigger,
                                   misfire_grace_time=1000,
                                   id='before_open_job')
            """ 开盘运行一次 """
            open_trigger = DateTrigger(run_date=f'{self.today} 09:30:00')
            self.scheduler.add_job(func=self.open,
                                   trigger=open_trigger,
                                   misfire_grace_time=1000,
                                   id='open_job')
            """ 止盈止损 """
            # 在 10:30至11:30期间,每隔1分 运行一次 stop_loss 方法
            am_stop_loss_trigger = IntervalTrigger(
                minutes=1,
                start_date=f'{self.today} 10:30:00',
                end_date=f'{self.today} 11:30:00')
            self.scheduler.add_job(func=self.stop_loss,
                                   trigger=am_stop_loss_trigger,
                                   misfire_grace_time=1000,
                                   id='am_stop_loss_job')
            # 在 13:00至15:00期间,每隔1分 运行一次 stop_loss 方法
            pm_stop_loss_trigger = IntervalTrigger(
                minutes=1,
                start_date=f'{self.today} 13:00:00',
                end_date=f'{self.today} 15:00:00')
            self.scheduler.add_job(func=self.stop_loss,
                                   trigger=pm_stop_loss_trigger,
                                   misfire_grace_time=1000,
                                   id='pm_stop_loss_job')
            """ 收盘前30分钟,运行一次 """
            before_close_trigger = DateTrigger(
                run_date=f'{self.today} 14:30:00')
            self.scheduler.add_job(func=self.close,
                                   trigger=before_close_trigger,
                                   misfire_grace_time=1000,
                                   id='before_close_job')
            """ 收盘后10分钟,运行一次 """
            close_trigger = DateTrigger(run_date=f'{self.today} 15:10:00')
            self.scheduler.add_job(func=self.close,
                                   trigger=close_trigger,
                                   misfire_grace_time=1000,
                                   id='close_job')
            """ 17:35爬取最新数据,选股,运行一次 """
            select_trigger = DateTrigger(run_date=f'{self.today} 17:35:00')
            self.scheduler.add_job(func=self.select,
                                   trigger=select_trigger,
                                   misfire_grace_time=1000,
                                   id='select_job')
            # 开始任务
            self.scheduler.start()

    def before_open(self):
        """
        开盘前15分钟,可以选出今天要交易的股票
        :return:
        """
        print('即将开盘')
        # 查询资金股份
        positions = client.get_positions()
        print('查询资金股份', positions)
        # 仓位
        self.positions = positions['positions']
        # 账户余额信息
        self.sub_accounts = positions['sub_accounts']
        # 选出要交易的股票,此操作放在收盘后会比较好
        if len(self.wait_stock) == 0:
            # 如果还没有待交易的股票,就重新选一次
            self.select()
        # 此处可以查看集合竞价的情况

    def open(self):
        print(f'{self.today}开盘', datetime.datetime.now())

    def stop_loss(self):
        print('止损监控', datetime.datetime.now())
        for pos in self.positions:
            # 查询个股当前的价位,如果达到止损点,进行止损
            print(pos)
            pass
        # 交易后,查询持仓情况
        self.positions = []

    def before_close(self):
        """
        收盘前30分钟
        :return:
        """
        print('还有半小时收盘', self.today)

    def close(self):
        print('已收盘', datetime.datetime.now())
        # 查询持仓情况
        self.positions = []
        # 统计盈亏
        # 更新个股的目标价,止损将按照这个价进行
        for pos in self.positions:
            # 更新个股的目标价,止损将按照这个价进行
            print(pos)
            pass

    def select(self):
        print('选股', self.today)
        # 更新数据并选出第二天要交易的票
        self.wait_stock = ['000001', '000004']
        # 删除所有调度任务
        print('关闭所有调度任务')
        self.scheduler.remove_all_jobs()

    def buy(self, code, price, amount):
        """
        买入
        :param code:买入的股票代码
        :return:
        """
        re = client.buy(client='title:网上股票交易系统5.0',
                        timeout=5,
                        symbol=code,
                        type='LIMIT',
                        priceType=0,
                        price=price,
                        amount=amount)
        print(re)
        pass

    def sell(self, code, price, amount):
        try:
            sell = client.sell(client='title:网上股票交易系统5.0',
                               timeout=5,
                               symbol=code,
                               type='LIMIT',
                               priceType=0,
                               price=price,
                               amount=amount)
            print(type(sell))
            print(sell)
        except requests.exceptions.HTTPError as e:
            print(type(e))
            print(11, e)

    def init(self):
        """
        总调度
        :return:
        """
        print('自动交易启动。。。')
        # 设置调度器,每天9:00运行一次
        run = BlockingScheduler()
        trigger = CronTrigger(day_of_week='*',
                              hour=8,
                              minute=30,
                              end_date='2100-01-01')
        # trigger = CronTrigger(day_of_week='*', hour='9-18', minute='0-59', second=30, end_date='2100-01-01')
        run.add_job(self.job,
                    trigger,
                    max_instances=10000,
                    misfire_grace_time=1000,
                    args=['正式调度'])
        run.start()

    def run(self):
        """
        程序运行入口
        使用线程,让其先运行一次
        :return:
        """
        threading.Thread(target=self.init).start()
        threading.Thread(target=self.job, args=("先执行一次", )).start()
Example #8
0
    'Test'
    #print('-------------')
    #for i in ArrayCourse:
    #	print(i)
    print('-------------')
    print("课程:" + Class)
    print("教室:" + Location)
    print("时间:" + Time)
    print('-------------')
    #调用阿里云接口
    print("---用阿里云接口---")
    AliSend.send(Class, Location, Time)


if __name__ == '__main__':
    try:
        print("---初始化定时任务列队---")
        scheduler = BlockingScheduler()
        print("注册任务:每天13点45分提醒")
        #每天13点10分提醒
        scheduler.add_job(main, 'cron', hour=13, minute=10)
        print("注册任务:每天22点25分提醒 提醒内容为明天早上")
        #每天22点25分提醒 提醒内容为明天早上
        scheduler.add_job(main, 'cron', hour=22, minute=40)
        print("执行中")
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print("\n---删除所有作业并退出---")
        scheduler.remove_all_jobs(jobstore=None)  #删除所有作业
Example #9
0
class ZhiHuiShuCourseWorkerBlocking:
    def __init__(self,
                 course_id: str,
                 hour=21,
                 minute=0,
                 second=0,
                 study_count: int = 3):
        self.course_recruit_id = course_id
        self.hour = hour
        self.minute = minute
        self.second = second

        self.study_count = study_count

        self.scheduler = BlockingScheduler()

    def job_start(self):
        logger.info('开始学习')

    def before_qr(self):
        logger.info('请扫码')

    def after_qrcode(self):
        logger.info('扫码成功')

    def job_finish(self):
        logger.info('全部学完了')

    def lesson_finish(self):
        logger.info('学完这一课了')

    def course_start(self):
        logger.info('开始学习这一门课程啦')

    def course_end(self):
        logger.info('这一门课程学完啦')

    def get_zhs(self, cookies: str = None) -> ZhiHuiShu:

        zhs = ZhiHuiShu()
        if not cookies:
            zhs.login(True, self.before_qr, self.after_qrcode)
        else:
            zhs.set_cookies(cookies)
        return zhs

    def job(self, cookies: str = None):
        # sleep(random.randrange(0, 1000))

        self.job_start()

        flag = False

        while not flag:
            try:
                zhs = self.get_zhs(cookies)
            except RuntimeError as e:
                logger.exception(e)
                logger.info('job尝试重新登录')
            else:
                flag = True

        # noinspection PyUnboundLocalVariable
        vl = zhs.video_list(self.course_recruit_id)
        si = zhs.query_study_info(vl)

        sc = self.study_count

        flag = False
        for k, v in si['lv'].items():
            if v['watchState'] == 0 and 'learnTime' not in v:
                flag = True
                pln = zhs.pre_learning_note(int(k), vl)
                logger.info(f'开始学习: {int(k)}')
                zhs.start_watch_blocking(int(k), vl, si, pln)
                self.lesson_finish()
                sc -= 1
                if sc <= 0:
                    break
        if not flag:
            # 没有一节课了
            self.scheduler.remove_all_jobs()
            self.scheduler.shutdown(wait=False)

        self.job_finish()
        zhs.close()

    def start(self):
        self.scheduler.add_job(
            self.job, 'cron', **{
                'hour': self.hour if self.hour else None,
                'minute': self.minute if self.minute else None,
                'second': self.second if self.second else None
            })
        self.scheduler.start()

    def start_with_cookies(self, cookies: str):
        self.scheduler.add_job(
            partial(self.job, cookies), 'cron', **{
                'hour': self.hour if self.hour else None,
                'minute': self.minute if self.minute else None,
                'second': self.second if self.second else None
            })
        self.scheduler.start()
Example #10
0
class CustomScheduler():
    def __init__(self, bot: CustomBot, database: SchedulerDatabase):
        self.bot = bot
        self.database = database
        self.scheduler = BlockingScheduler()
        self.scheduler.add_jobstore(MemoryJobStore(), alias='scheduled')
        self.logger = getLogger(__name__)
        self.actions = [
            ['send_and_delete_message', self.send_and_delete_message],
            ['open_link_with_delay', self.open_link_with_delay]
        ]

    def reload_database(self):
        self.scheduler.remove_all_jobs(jobstore='default')
        self.database.load_database()
        self.create_jobs()
        self.database.load_database()

    def disable_apscheduler_logger(self):
        """ Optional """
        getLogger('apscheduler.scheduler').setLevel(WARNING)
        getLogger('apscheduler.executors.default').setLevel(WARNING)

    def add_delay_job_run_once(self, func, delta, kwargs):
        run_date = datetime.now() + timedelta(**delta)
        self.scheduler.add_job(func=func, trigger="date",
                               run_date=run_date, kwargs=kwargs, jobstore='scheduled', misfire_grace_time=None)

    def send_message(self, payload):
        payload.get('message').update(
            text='\n'.join(payload.get('message').get('text')))
        return self.bot.send_message_thank(**payload.get('message'))

    def delete_message(self, payload, message_id):
        kwargs = {
            'func': self.bot.delete_message,
            'delta': payload.get('timedelta'),
            "kwargs": {
                'chat_id': payload.get('message').get('chat_id'),
                'message_id': message_id
            }
        }
        self.add_delay_job_run_once(**kwargs)

    def send_and_delete_message(self, payload: dict):
        message_id = self.send_message(payload)
        self.delete_message(payload, message_id)

    def open_link(self, payload: dict):
        kwargs = {
            'func': startfile,
            'delta': payload.get('timedelta'),
            'kwargs': {
                'filepath': payload.get('url')
            }
        }
        self.add_delay_job_run_once(**kwargs)

    def open_zoom_link(self, payload: dict):
        # TODO:
        #  parse link and open through build in app
        #  not though browser it's annoying
        self.open_link(payload)

    def open_link_with_delay(self, payload: dict):
        url = payload.get('url')
        if 'zoom.us' in url:
            self.open_zoom_link(payload)
        else:
            self.open_link(payload)

    def timeout_job_manager(self, payload: list):
        for income_action in payload:
            action = income_action.get('action')

            for action_name, func in self.actions:
                if action == action_name:
                    func(income_action)

    def create_job(self, job: dict):
        self.scheduler.add_job(func=self.timeout_job_manager,
                               kwargs={'payload': job.get('payload')}, jobstore='default', misfire_grace_time=None, **job.get('time'))

    def create_jobs(self):
        for job in self.database.get_all_jobs():
            self.logger.info(f'Processing job with id: {job.get("id")}')
            self.create_job(job)

    def start(self):
        self.logger.info('Custom Scheduler started')
        self.scheduler.start()

    def stop(self):
        self.logger.info('Custom Scheduler has been shut down')
        self.scheduler.shutdown()
Example #11
0
class SchedUtility(object, metaclass=Singleton):
    
    def __init__(self):
        try:
            self.Global = Global()
            self.Utility = Utility()
            self.InfraUtil = InfraUtility()
            self.db = DBMySql('Scheduler')

            self.myModulePyFile = os.path.abspath(__file__)
            self.myClass = self.__class__.__name__

            #Setting the infrastructure
            self.Infra = self.InfraUtil.setInfra(self.Global.SchedulerInfraKey)
            if not self.Infra:
                raise InfraInitializationError('Could not initialize {cls}'.format(cls=(self.myModulePyFile,self.myClass)))

            # we need to get the proper logger for a given module
            self.logger = self.Infra.getInfraLogger(self.Global.SchedulerInfraKey)

            # loading Schduler config and starting scheduler
            self.__startScheduler__()

        except Exception as err:
            raise err

    def __startScheduler__(self):

        try:
            mySchedulerType = self.Global.DefaultSchedulerType
            mySchedulerMode = self.Global.DefaultSchedulerMode

            if mySchedulerMode == 'Run':
                myArgPaused = False
            else:
                myArgPaused = True
            #fi

            mySchedulerConfig = self.Utility.getACopy(self.Infra.schedulerConfigData)

            if mySchedulerType == 'Background':
                self.Scheduler = BackgroundScheduler(mySchedulerConfig)
            else:
                self.Scheduler = BlockingScheduler(mySchedulerConfig)
            #fi

            if not self.Scheduler.running:
                self.Scheduler.start(paused = myArgPaused)

        except Exception as err:
            raise err

    def getAllJobDetail(self):
        '''
        Description: Returns all jobs as stored in scheduler
        '''
        myJobDetail = []
        
        for job in self.Scheduler.get_jobs():
            myJobDetail.append(self.getAJobDetail(job.id))

        return myJobDetail

    def getAJobDetail(self, jobIdArg):
        '''
        Description: Print all jobs as stored in scheduler
        '''
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        myJobDetail = job.__getstate__()

        return myJobDetail

    def suspendJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.pause()

    def resumeJob(self, jobIdArg):
        myJobId = jobIdArg
        job = self.Scheduler.get_job(myJobId)
        job.resume()

    def getCurrentlyExecutingJob(self):
        return len(self.Scheduler.get_jobs())

    def removeJob(self, jobId):
        try:
            self.Scheduler.remove_job(jobId)
        except JobLookupError as err:
            print('Invalid Job !!')

    def removeAllJobs(self):
        try:
            self.Scheduler.remove_all_jobs()
        except Exception as err:
            raise err

    def getAllJobsFromRep(self):
        for job in self.Scheduler.get_jobs():
            myJobDetail = self.Scheduler.get_job(job.id)    
            print(job,myJobDetail)

    def getNewJob(self,prefixArg):
        # random number between 10 and 99 to ensure we always get 2 digit
        if isinstance(prefixArg,str) and prefixArg is not None:
            return prefixArg + '_' + str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99)))
        else:
            return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%-H%M%S_') + str(random.randrange(10,99))

    def getJobInfoFromDb(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.processDbRequest(operation = self.Global.fetch, container = 'ScheduledJobs', contents = ['*'], criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            return myResponse

    def getNextSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria) + 1

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getCurrentSeqForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def getElapsedStatsForJob(self, jobIdArg):
        try:
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg = myJobId))

            myJobCriteria = 'JobId = %s ' %repr(myJobId)
            return self.db.getTotalRowCount(container = 'ScheduledJobsRunLog', criteria = myJobCriteria)

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            return myErrorMsg

    def processJobStartEvent(self, jobIdArg):
        '''
        1. Mark job started in ScheduledJobs
        2. Create new entry for this job in ScheduledJobsRunLog
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                # building data for SchedulerJobsRunLog
                myJobCriteria = ' JobId = %s' %repr(myJobId)
                myNextSeqForJob = self.getNextSeqForJob(myJobId)

                # will mark the job started and creat the run log for this run
                self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={'Status': 'Executing'}, criteria = myJobCriteria, commitWork=True )
                
                # creating run information
                self.db.processDbRequest(operation='create', container='ScheduledJobsRunLog', \
                        dataDict={'JobId':myJobId, 'Seq' : myNextSeqForJob,  'ExecutionStarted': self.Utility.getCurrentTime()}, commitWork=True )

                self.Utility.buildResponse(myResponse, self.Global.Success, self.Global.Success, {'Seq':myNextSeqForJob})
            else:
                self.Utility.buildResponse(myResponse, self.Global.UnSuccess, 'Cound not find job details for job {job}'.format(job = myJobId))

            return myResponse

        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess,myErrorMsg)
            #raise err # will raise the error so this can be logged by scheduler as an error occurred in processing job
            return myResponse

    def processJobFinishEvent(self, jobIdArg, execDetailsArg):
        '''
        1. Mark job completed (update failure cnt and total count and consc fail count, lastrunstatus) in ScheduledJobs
        2. Update ScheduledJobsRunlog container
        '''
        try:
            # initializing
            myResponse = self.Utility.getResponseTemplate()
            myJobId = self.Utility.getACopy(jobIdArg)
            myExecDetails = execDetailsArg
            myJobStatus = self.Global.NextJobRun
            
            self.logger.debug('arg [{arg}] received'.format(arg=myJobId))

            myJobDetailsFromDb = self.getJobInfoFromDb(myJobId)['Data']

            if myJobDetailsFromDb:

                self.logger.debug('Job details found, proceeding with finish event')
                myJobCriteria = 'JobId = %s' %repr(myJobId)
                myCurrentSeqForJob = self.getCurrentSeqForJob(myJobId)
                myJobRunCriteria = ' JobId = %s and Seq = %s ' %(repr(myJobId), myCurrentSeqForJob)

                self.logger.debug('Job criteria {criteria}'.format(criteria = myJobCriteria))
                self.logger.debug('Job criteria with seq {criteria}'.format(criteria = myJobRunCriteria))

                myJobDetailsFromSched = self.getAJobDetail(myJobId)

                # Updating execution details in ScheduledJobsRunLog
                self.logger.debug('udating statistics of this run')

                myDbResult = self.db.processDbRequest(operation = 'change', container = 'ScheduledJobsRunLog', \
                    dataDict={
                        'Status': myExecDetails['Status'], 'ElapsedSeconds':myExecDetails['Data']['ElapsedSecs'],
                        'ExecutionCompleted': self.Utility.getCurrentTime(), 'ExecutionDetail': json.dumps(myExecDetails['Data']) 
                    }, criteria = myJobRunCriteria, commitWork=True )

                self.logger.debug('ScheduledJobsRunLog: db results >> {results}'.format(results = myDbResult))

                # Updating execution details in ScheduledJobs
                #if myExecDetails['Status'] == self.Global.Success:
                    # if success, reset consecfailcnt to 0, increment totalrun by 1 and update next run
                myElapsedStats = self.db.executeDynamicSql(\
                    operation = 'fetch', \
                    sql_text = 'select min(ElapsedSeconds) "Min", max(ElapsedSeconds) "Max", avg(ElapsedSeconds) "Avg" from ScheduledJobsRunLog')

                self.logger.debug('Elapsed Stats: {stats}'.format(stats = myElapsedStats))

                myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                    dataDict={
                        'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                        'NextRun' : myJobDetailsFromSched['next_run_time'].strftime('%Y-%m-%d% %H:%M:%S'), 'LatConsecFailCnt' : 0,
                        'MinElapsedSecs' : myElapsedStats['Data'][0]['Min'], 'MaxElapsedSecs' : myElapsedStats['Data'][0]['Min'] , 
                        'AvgElapsedSecs' : myElapsedStats['Data'][0]['Avg']  
                    }, criteria = myJobCriteria, commitWork=True )

                self.logger.debug('ScheduledJobs: last stats update >> {result}'.format(result = myDbResult))

                #self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
                '''
                else:
                    # process job was unsuccessful
                    if myJobDetailsFromDb[0]['LatConsecFailCnt'] >= self.Global.SchedConsecFailCntThreshold:
                        myJobStatus = self.Global.SuspendMode
                        self.logger.info('suspending job {job}'.format(job=myJobId))
                        self.suspendJob(myJobId)

                    myDbResult = self.db.processDbRequest(operation='change', container='ScheduledJobs', \
                        dataDict={
                            'Status': myJobStatus, 'LastRunStatus': myExecDetails['Status'], 'TotalRun' : myJobDetailsFromDb[0]['TotalRun'] + 1,
                            'next_run' : myJobDetailsFromSched['next_run_time'], 'LatConsecFailCnt' : myJobDetailsFromDb[0]['LatConsecFailCnt'] + 1, 
                            'TotalFailure' :  myJobDetailsFromDb[0]['TotalFailure' + 1]
                        }, criteria = myJobCriteria, commitWork=True )
                    # will suspend the job if total failure count has been reached beyond Total consecutive failure threshold
                    self.Utility.buildResponse(myResponse, self.Global.UnSuccess,self.Global.UnSuccess)
                    raise processJobError(myExecDetails['Message'])
                '''
            self.Utility.buildResponse(myResponse, self.Global.Success,self.Global.Success)
            return myResponse
        except Exception as err:
            myErrorMsg, myTraceback = self.Utility.getErrorTraceback()
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myErrorMsg)
            self.logger.error(self.Global.DefPrefix4Error * self.Global.DefPrefixCount , myTraceback)
            self.Utility.buildResponse(myResponse, self.Global.UnSuccess, myErrorMsg)
            return myResponse