예제 #1
0
class HttpServer(flask.Flask):
    """Our HTTP/API server."""

    EXECUTORS = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }

    def __init__(self, name, ip, port, *args, **kwargs):
        """Constructor.

        Args:
            name:  (str) name of Flask service
            ip:  (str) IP address to bind HTTP server
            port:  (int) TCP port for HTTP server to listen
        """
        super(HttpServer, self).__init__(name, *args, **kwargs)
        # Fixup the root path for Flask so it can find templates/*
        root_path = os.path.abspath(os.path.dirname(__file__))
        logging.debug('Setting root_path for Flask: %s', root_path)
        self.root_path = root_path
        self.targets = config.CollectorConfig()
        self.ip = ip
        self.port = port
        self.start_time = time.time()
        self.setup_time = 0
        self.scheduler = BackgroundScheduler(daemon=True,
                                             executors=self.EXECUTORS)
        self.collection = None
        self.add_url_rule('/', 'index', self.index_handler)
        self.add_url_rule('/status', 'status', self.status_handler)
        self.add_url_rule('/latency', 'latency', self.latency_handler)
        self.add_url_rule('/influxdata', 'influxdata', self.influxdata_handler)
        self.add_url_rule('/quitquit', 'quitquit', self.shutdown_handler)
        logging.info('Starting Llama Collector, version %s', __version__)

    def configure(self, filepath):
        """Configure the Collector from file.

        Args:
            filepath: (str) where the configuration is located
        """
        self.targets.load(filepath)

    def status_handler(self):
        return flask.Response('ok', mimetype='text/plain')

    def index_handler(self):
        return flask.render_template(
            'index.html',
            targets=self.targets.targets,
            interval=self.interval,
            start_time=self.start_time,
            setup_time=self.setup_time,
            uptime=humanfriendly.format_timespan(time.time() -
                                                 self.start_time))

    def latency_handler(self):
        data = json.dumps(self.collection.stats, indent=4)
        return flask.Response(data, mimetype='application/json')

    def influxdata_handler(self):
        data = json.dumps(self.collection.stats_influx, indent=4)
        return flask.Response(data, mimetype='application/json')

    def shutdown_handler(self):
        """Shuts down the running web server and other things."""
        logging.warn('/quitquit request, attempting to shutdown server...')
        self.scheduler.shutdown(wait=False)
        fn = flask.request.environ.get('werkzeug.server.shutdown')
        if not fn:
            raise Error('Werkzeug (Flask) server NOT running.')
        fn()
        return '<pre>Quitting...</pre>'

    def run(self,
            interval,
            count,
            use_udp=False,
            dst_port=util.DEFAULT_DST_PORT,
            timeout=util.DEFAULT_TIMEOUT,
            *args,
            **kwargs):
        """Start all the polling and run the HttpServer.

        Args:
            interval:  seconds between each poll
            count:  count of datagram to send each responder per interval
            use_udp:   utilize UDP probes for testing
            dst_port:  port to use for testing (only UDP)
            timeout:  how long to wait for probes to return
        """
        self.interval = interval
        self.scheduler.start()
        self.collection = Collection(self.targets, use_udp)
        self.scheduler.add_job(self.collection.collect,
                               'interval',
                               seconds=interval,
                               args=[count, dst_port, timeout])
        super(HttpServer, self).run(host=self.ip,
                                    port=self.port,
                                    threaded=True,
                                    *args,
                                    **kwargs)
        self.setup_time = round(time.time() - self.start_time, 0)
예제 #2
0
from apscheduler.jobstores.mongodb import MongoDBJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.triggers.combining import AndTrigger, OrTrigger
from apscheduler.triggers.interval import IntervalTrigger
from apscheduler.triggers.cron import CronTrigger
from apscheduler.events import EVENT_ALL, EVENT_JOB_SUBMITTED, EVENT_JOB_ERROR
from multiprocessing import cpu_count
from dycounter.pools import mongopool
import dycounter.config as cf
import logging
import pytz

jobstores = {'default': MongoDBJobStore(host='localhost', port=27017)}

executors = {
    'default': ThreadPoolExecutor(cpu_count()),
    'processpool': ProcessPoolExecutor(cpu_count())
}

job_defaults = {'coalesce': False, 'max_instances': 3}

logger = logging.getLogger('apscheduler.executors.default')
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s  %(levelname)-10s %(module)-15s %(message)s')


class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
예제 #3
0
#     while True:
#         time.sleep(1)
#
#
# if __name__ == '__main__':
#     main()
import datetime as dt
import os
import time
from datetime import datetime

from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler

scheduler = BackgroundScheduler(
    executors={"processpool": ThreadPoolExecutor(10)})


def tick():
    next_run_at = datetime.now() + dt.timedelta(seconds=3.0)
    print('Tick! The time is: %s, run next at:%s' %
          (datetime.now(), next_run_at))
    scheduler.add_job(tick,
                      next_run_time=next_run_at,
                      executor="processpool",
                      id="hello kitty")
    print("pid:%r" % os.getpid())


if __name__ == '__main__':
    next_run_at = datetime.now() + dt.timedelta(seconds=3.0)
예제 #4
0
파일: modelrun.py 프로젝트: CharsLeung/Calf
    def DScheduler(cls,
                   action,
                   start_date=None,
                   execute_date=None,
                   end_date=None,
                   execute_interval=3,
                   tz=None,
                   **kwargs):
        """
        一个依托于时间驱动的实时任务,action所挂载的任务由相应的时间驱动,
        这跟run方法由K线更新驱动不一样,时区功能未起作用
        :param action:
        :param start_date:like '09:30:00'
        :param execute_date:like '09:30:00-11:30:00' or '09:30:00-11:30:00 13:00:00-15:00:00'
        :param end_date:like '15:00:00'
        :param execute_interval:连续任务的执行时间间隔,以秒计
        :param tz:时区
        :return:
        """
        fmt = '%Y-%m-%d %H:%M:%S'
        if start_date is not None:
            try:
                sdt = dt.datetime.strptime('2000-01-01 ' + start_date, fmt)
            except Exception:
                raise TypeError(
                    'this start_date param like a "09:30:00" string')
        if execute_date is not None:
            try:
                xdt = []
                dts = execute_date.split(' ')
                for et in dts:
                    t = et.split('-')
                    s = dt.datetime.strptime('2000-01-01 ' + t[0], fmt)
                    e = dt.datetime.strptime('2000-01-01 ' + t[1], fmt)
                    # if s > e:
                    # 如果execute的start大于end说明是当天的end到第二天的start
                    # raise TypeError('execute start datetime must less than end')
                    xdt.append([s, e])
                    del s, e, t
                del dts
            except Exception:
                raise TypeError(
                    'this start_date param like a "09:30:00-11:30:00" or'
                    ' "09:30:00-11:30:00 13:00:00-15:00:00"')
        if end_date is not None:
            try:
                edt = dt.datetime.strptime('2000-01-01 ' + end_date, fmt)
            except Exception:
                raise TypeError('this end_date param like a "15:30:00" string')
        if tz is not None:
            if tz not in pytz.all_timezones:
                raise ValueError(
                    'Only timezones from the pytz library are supported')
            else:
                tz = pytz.timezone(tz)
        # from pytz import FixedOffset, utc
        from apscheduler.triggers.date import DateTrigger
        from apscheduler.triggers.interval import IntervalTrigger
        from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor

        calibration()

        while 1:
            # scheduler = BlockingScheduler(daemonic=False)
            # crt = CalfDateTime.now(tz, offset)
            crt = dt.datetime.now() if tz is None else dt.datetime.now(
                tz=tz).replace(tzinfo=None)
            tdy = dt.datetime(crt.year, crt.month, crt.day)
            # 非交易日
            if not action.is_trade_day(tdy):
                print(fontcolor.F_RED + '-' * 80)
                print('Note:Non-transaction date;Datetime:' + str(crt))
                print('-' * 80 + fontcolor.END)
                delta = (tdy + dt.timedelta(days=1) - crt).seconds
                delta = 1 if delta < 1 else delta
                time.sleep(delta)  # sleep to next day
                continue
            # 交易日
            else:
                try:
                    nsds = list()
                    executors = {
                        'default': ThreadPoolExecutor(4),
                        'processpool': ProcessPoolExecutor(4)
                    }
                    job_defaults = {'coalesce': True, 'max_instances': 1}
                    scheduler = BackgroundScheduler(executors=executors,
                                                    job_defaults=job_defaults,
                                                    daemonic=False,
                                                    timezone=tz)
                    if start_date is not None:
                        d = tdy + dt.timedelta(hours=sdt.hour,
                                               minutes=sdt.minute,
                                               seconds=sdt.second)
                        nsds.append(d + dt.timedelta(days=1))

                        def action_start(args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:start task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:

                                def start(**args):
                                    action.start(**args)

                                t = threading.Thread(target=start,
                                                     args=(args, ))
                                t.start()
                            except Exception as ep:
                                ExceptionInfo(ep)

                        scheduler.add_job(func=action_start,
                                          trigger=DateTrigger(d),
                                          id='action_start',
                                          args=[kwargs])
                    if execute_date is not None:

                        def action_execute(args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:execute task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:

                                def exe(**args):
                                    action.execute(**args)

                                t = threading.Thread(target=exe, args=(args, ))
                                t.start()
                                t.join(execute_interval - 1)
                                # action.execute(args=args)
                            except Exception as ep:
                                ExceptionInfo(ep)

                        for x in xdt:
                            sd = tdy + dt.timedelta(hours=x[0].hour,
                                                    minutes=x[0].minute,
                                                    seconds=x[0].second)
                            ed = tdy + dt.timedelta(hours=x[1].hour,
                                                    minutes=x[1].minute,
                                                    seconds=x[1].second)
                            if sd > ed:
                                # 当出现了‘21:30:00-04:00:00’这种类型的格式,表示任务执行时间应该
                                # 从当天的21:30到第二天的04:00
                                ed = ed + dt.timedelta(days=1)
                            else:
                                pass
                            scheduler.add_job(func=action_execute,
                                              trigger=IntervalTrigger(
                                                  seconds=execute_interval,
                                                  start_date=sd,
                                                  end_date=ed),
                                              args=[kwargs])
                            nsds.append(sd + dt.timedelta(days=1))

                    if end_date is not None:

                        def action_end(args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:end task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:

                                def end(**args):
                                    action.end(**args)

                                t = threading.Thread(target=end, args=(args, ))
                                t.start()
                            except Exception as ep:
                                ExceptionInfo(ep)

                        d = tdy + dt.timedelta(hours=edt.hour,
                                               minutes=edt.minute,
                                               seconds=edt.second)
                        nsds.append(d + dt.timedelta(days=1))
                        scheduler.add_job(func=action_end,
                                          trigger=DateTrigger(d),
                                          id='action_end',
                                          timezone=tz,
                                          args=[kwargs])
                    print(fontcolor.F_GREEN + '-' * 80)
                    print('Note:enter Calf real task and mount these tasks:')
                    scheduler.print_jobs()
                    print('Datetime:' + str(crt))
                    print('-' * 80 + fontcolor.END)
                    scheduler.start()
                    # 计算距离下一次启动应该休眠多久
                    if len(nsds) == 0:
                        break
                    # ed = CalfDateTime.now(tz, offset)
                    nd = dt.datetime.now() if tz is None else dt.datetime.now(
                        tz=tz).replace(tzinfo=None)
                    delta = (min(nsds) - nd)
                    delta = delta.seconds + delta.days * 86400 - 1
                    print(fontcolor.F_YELLOW + '-' * 80)
                    print(
                        'Note:Calf will sleep {0} seconds and restart on {1}:'.
                        format(delta, min(nsds)))
                    print('Datetime:', str(crt))
                    print('-' * 80 + fontcolor.END)
                    delta = 1 if delta < 1 else delta
                    time.sleep(delta)
                    scheduler.shutdown(wait=False)
                    del scheduler
                except Exception as e:
                    ExceptionInfo(e)
            pass
예제 #5
0
    log_level = logging.INFO
    logging.basicConfig(
        level=log_level,
        # format='[%(asctime)s] - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
        format=
        '[%(asctime)s] - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S',
        filename=log_filename,
        filemode='a')
    logging.getLogger(__name__).setLevel(log_level)
    logging.info("现在的时间是:%s" % get_format_time())
    logging.info("xjobs定时调度程序已启动,正在初始化...")

    # scheduler配置设置
    executors = {
        'default': ThreadPoolExecutor(max_workers=30),
        'processpool': ProcessPoolExecutor(max_workers=30)
    }
    job_defaults = {
        'coalesce': True,  # 积攒的任务只跑一次
        'max_instances': 1,  # 只能有一个实例并发
        'misfire_grace_time': 60  # 60秒的任务超时容错
    }
    scheduler = BackgroundScheduler(executors=executors,
                                    job_defaults=job_defaults)

    # 每次启动xjobs先加载一遍所有的任务进来
    load_task(1)
    # 添加定时任务,每10分钟查看有没有新配置的任务,自动更新加载
    scheduler.add_job(func=load_task,
                      args=(0, ),
예제 #6
0
class APS():
    executors = {
        'default': ThreadPoolExecutor(10),
        'processpool': ProcessPoolExecutor(1),
    }
    jobstores = {
        'default':
        SQLAlchemyJobStore(url='mysql+pymysql://root:123456@localhost/iam')
    }
    job_defaults = {
        'coalesce': True,
        'max_instances': 10,
    }
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults)

    log.setup_logging()
    logger = log.get_logger()

    # @classmethod
    # def write_error_logs(cls,file,errors):
    #     logger = logging.getLogger(__name__)
    #     logger.setLevel(level=logging.INFO)
    #     file_hanlder = logging.FileHandler(filename=file,mode='a', encoding='utf-8')
    #     file_hanlder.setLevel(logging.INFO)
    #     formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
    #     file_hanlder.setFormatter(formatter)
    #     logger.addHandler(file_hanlder)
    #     logger.info(errors)
    #     logger.removeHandler(file_hanlder)

    @classmethod
    def ajob(cls):
        print("11111:%s" % datetime.datetime.now())
        cls.logger.info(datetime.datetime.now())

    @classmethod
    def get_api_case_coverage(cls):
        curl = r'http://192.168.120.173:8000/getcommentdetail'
        connection = db.MysqlTools().connection
        cursor = connection.cursor()
        sql = 'select api_path,project_id from api_api;'
        cursor.execute(sql)
        api_list = cursor.fetchall()
        for api in api_list:
            try:
                comment = re.search(r'^.*?([^/\s]+?)/?$', api['api_path'])
                data = {
                    'changename': r'application/controllers/api',
                    'object_id': api['project_id'],
                    'comment': comment.group(1)
                }
                commentdetail = requests.get(url=curl, params=data).json()
                if 'if_cn' in commentdetail.keys():
                    count = (commentdetail['if_cn'] + commentdetail['for_cn'] +
                             commentdetail['switch_cn'] +
                             commentdetail['while_cn']) * 2
                    update_sql = "update api_api set api_case_coverage = %d where api_path = '%s';" % (
                        count, api['api_path'])
                    cursor.execute(update_sql)
                    connection.commit()
                else:
                    update_sql = "update api_api set api_case_coverage = 1 where api_path = '%s' ;" % (
                        api['api_path'])
                    cursor.execute(update_sql)
                    connection.commit()
            except Exception as e:
                print(e)
                cls.logger.error(e)
                continue
        connection.close()

    @classmethod
    def add_acc_tasks(cls, id):
        cls.scheduler.add_job(cls.get_api_case_coverage,
                              'interval',
                              seconds=3600,
                              id=id)

    @classmethod
    def start_aps(cls):
        cls.scheduler.start()
        cls.logger.info("任务开始!!!")

    @classmethod
    def stop_aps(cls):
        cls.scheduler.shutdown(wait=False)
        cls.logger.info("任务结束!!!")
from pytz import utc
from apscheduler.schedulers.background import BlockingScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor
from .database import postgres_connection_string

job_stores = {'default': SQLAlchemyJobStore(url=postgres_connection_string)}
executors = {'default': ThreadPoolExecutor(20)}
job_defaults = {'coalesce': False, 'max_instances': 5}

app_scheduler = BlockingScheduler(executors=executors,
                                  job_defaults=job_defaults,
                                  timezone=utc)
예제 #8
0
 def __init__(self):
     self.scheduler = BackgroundScheduler(timezone=self.timezone, executors={'default': ThreadPoolExecutor(30)})
예제 #9
0
파일: record.py 프로젝트: radtek/autocheck
def record():
    log_file, log_level = log.get_log_args()
    logger = log.Logger(log_file, log_level)
    logger.logger.info("开始采集资源信息...")

    max_threads = 50
    executors = {"default": ThreadPoolExecutor(max_threads)}
    job_defaults = {
        "coalesce": True,
        "max_instances": 1,
        "misfire_grace_time": 3,
    }
    scheduler = BlockingScheduler(job_defaults=job_defaults,
                                  executors=executors)

    min_value = 10

    # host资源记录
    logger.logger.info("开始采集主机资源信息...")
    disk_interval, cpu_interval, memory_interval, swap_interval, users_limit = conf.get(
        "host", "disk_interval", "cpu_interval", "memory_interval",
        "swap_interval", "users_limit")
    if int(disk_interval) < min_value:
        disk_interval = min_value
    if int(cpu_interval) < min_value:
        cpu_interval = min_value
    if int(memory_interval) < min_value:
        memory_interval = min_value
    if int(swap_interval) < min_value:
        swap_interval = min_value

    logger.logger.info("开始采集磁盘资源信息...")
    scheduler.add_job(host.disk_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(disk_interval),
                      id='disk_record')
    logger.logger.info("开始采集CPU资源信息...")
    scheduler.add_job(host.cpu_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(cpu_interval),
                      id='cpu_record')
    logger.logger.info("开始采集内存资源信息...")
    scheduler.add_job(host.memory_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(memory_interval),
                      id='memory_record')
    logger.logger.info("开始采集Swap资源信息...")
    scheduler.add_job(host.swap_record,
                      'interval',
                      args=[log_file, log_level],
                      seconds=int(swap_interval),
                      id='swap_record')
    logger.logger.info("开始采集启动时间资源信息...")
    #scheduler.add_job(host.boot_time_record, 'interval', args=[log_file, log_level], seconds=int(boot_time_interval), id='boot_time_record')
    host.boot_time_record(log_file, log_level)

    # 用户资源限制
    logger.logger.info("开始记录用户限制信息...")
    if users_limit is not None:
        users_limit_list = []
        for i in users_limit.split(","):
            users_limit_list.append(i.strip())

        for user in users_limit_list:
            scheduler.add_job(user_resource.record,
                              'interval',
                              args=[log_file, log_level, user],
                              next_run_time=datetime.datetime.now() +
                              datetime.timedelta(seconds=5),
                              minutes=60,
                              id=f'{user}_limit')

    # tomcat资源
    tomcat_check, tomcat_interval, tomcat_port = conf.get(
        "tomcat",
        "check",
        "tomcat_interval",
        "tomcat_port",
    )
    if tomcat_check == '1':
        logger.logger.info("开始采集Tomcat资源信息...")
        tomcat_port_list = []  # 将tomcat_port参数改为列表
        for i in tomcat_port.split(","):
            tomcat_port_list.append(i.strip())
        if int(tomcat_interval) < min_value:
            tomcat_interval = min_value
        scheduler.add_job(tomcat.record,
                          'interval',
                          args=[log_file, log_level, tomcat_port_list],
                          seconds=int(tomcat_interval),
                          id='tomcat_record')

    # redis资源
    redis_check, redis_interval, redis_password, redis_port, sentinel_port, sentinel_name, commands = conf.get(
        "redis", "check", "redis_interval", "password", "redis_port",
        "sentinel_port", "sentinel_name", "commands")
    if redis_check == "1":
        if int(redis_interval) < min_value:
            redis_interval = min_value
        logger.logger.info("开始采集Redis资源信息...")
        scheduler.add_job(redis.record, 'interval', args=[log_file, log_level, redis_password, redis_port, sentinel_port, sentinel_name, commands], \
                seconds=int(redis_interval), id='redis_record')

    # backup
    backup_check, backup_dir, backup_regular, backup_cron_time = conf.get(
        "backup", "check", "dir", "regular", "cron_time")
    if backup_check == "1":
        logger.logger.info("开始记录备份信息...")
        dir_list = []
        for i in backup_dir.split(","):
            dir_list.append(i.strip())

        regular_list = []
        for i in backup_regular.split(","):
            regular_list.append(i.strip())

        cron_time_list = []
        for i in backup_cron_time.split(","):
            cron_time_list.append(i.strip())

        for i in range(len(dir_list)):
            directory = dir_list[i]
            regular = regular_list[i]
            cron_time = cron_time_list[i].split(":")
            hour = cron_time[0].strip()
            minute = cron_time[1].strip()
            scheduler.add_job(backup.record,
                              'cron',
                              args=[log_file, log_level, directory, regular],
                              next_run_time=datetime.datetime.now(),
                              day_of_week='0-6',
                              hour=int(hour),
                              minute=int(minute),
                              id=f'backup{i}')

    # 记录mysql
    mysql_check, mysql_interval, mysql_user, mysql_ip, mysql_port, mysql_password = conf.get(
        "mysql", "check", "mysql_interval", "mysql_user", "mysql_ip",
        "mysql_port", "mysql_password")
    if mysql_check == "1":
        if int(mysql_interval) < min_value:
            mysql_interval = min_value
        logger.logger.info("开始采集MySQL资源信息...")
        scheduler.add_job(mysql.record,
                          'interval',
                          args=[
                              log_file, log_level, mysql_user, mysql_ip,
                              mysql_password, mysql_port
                          ],
                          seconds=int(mysql_interval),
                          id='mysql_record')

    # 记录Oracle
    oracle_check, oracle_interval = conf.get("oracle", "check",
                                             "oracle_interval")
    if oracle_check == "1":
        if int(oracle_interval) < min_value:
            oracle_interval = min_value
        logger.logger.info("开始记录Oracle信息...")
        scheduler.add_job(oracle.record,
                          'interval',
                          args=[log_file, log_level],
                          seconds=int(oracle_interval),
                          id='oracle_record')

    scheduler.start()
예제 #10
0
-------------------------------------------------
"""
import utils.config as globalVar
import threading
from resource.rizhiyiSearch import Rizhiyi
from resource.updateVar import updateVar
from resource.api import runDaemon as flaskApi
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from utils.config import Config

rzy = Rizhiyi()
updateVars = updateVar()
conf = Config()

executors = {'default': ThreadPoolExecutor(max_workers=2)}
scheduler = BackgroundScheduler(executors=executors)


def scheduleRunner(scheduleType):
    """
    定时任务器
    :return:
    """
    if scheduleType == "spl查询":
        scheduler.add_job(func=rzy.search,
                          trigger="interval",
                          seconds=conf.splSearchInterval)
    elif scheduleType == "凌晨更新变量":
        scheduler.add_job(func=updateVars.setDefaultValue,
                          trigger="cron",
예제 #11
0
파일: scheduler.py 프로젝트: zoushiwen/spug
 def __init__(self):
     self.scheduler = BackgroundScheduler(timezone=self.timezone, executors={'default': ThreadPoolExecutor(30)})
     self.scheduler.add_listener(
         self._handle_event,
         EVENT_SCHEDULER_SHUTDOWN | EVENT_JOB_ERROR | EVENT_JOB_MAX_INSTANCES | EVENT_JOB_EXECUTED)
예제 #12
0
    warn_file = lib.scheduler_config.get_config_str('log', 'warnlog')
    info_file = lib.scheduler_config.get_config_str('log', 'infolog')
    io_number = lib.scheduler_config.get_config_int('async', 'io_number')
    work_number = lib.scheduler_config.get_config_int('async', 'work_number')
    is_coroutine = lib.scheduler_config.get_config_boolean(
        'async', 'coroutine')

    #scheduler配置参数
    thread_num = lib.scheduler_config.get_config_int('scheduler', 'thread_num')
    process_num = lib.scheduler_config.get_config_int('scheduler',
                                                      'process_num')

    #初始化全局唯一的任务分配对象
    global scheduler
    executors = {
        'default': ThreadPoolExecutor(thread_num),
        'processpool': ProcessPoolExecutor(process_num)
    }
    scheduler = BackgroundScheduler(executors=executors)
    scheduler.start()

    #数据库配置
    db_host = lib.scheduler_config.get_config_str('database', 'host')
    db_port = lib.scheduler_config.get_config_int('database', 'port')
    db_user = lib.scheduler_config.get_config_str('database', 'username')
    db_passwd = lib.scheduler_config.get_config_str('database', 'password')
    db_db = lib.scheduler_config.get_config_str('database', 'db')
    lib.scheduler_db.set_param(db_host, db_port, db_user, db_passwd, db_db)

    #发送邮件和短信url
    global mail_url
    blast_results_folder = os.path.join(result_folder, "blast_results")
    if not os.path.exists(blast_results_folder):
        os.makedirs(blast_results_folder)
        logger.debug("Made new folder: blast_results")

    analysis_result_folder = os.path.join(result_folder, "analysis")
    if not os.path.exists(analysis_result_folder):
        os.makedirs(analysis_result_folder)
        logger.debug("Made new folder: analysis")

    blast_result = []
    logger.debug("Setting up scheduler")
    scheduler = BackgroundScheduler(
        job_defaults={'misfire_grace_time': 1500 * 2400})
    scheduler.add_executor(ThreadPoolExecutor(10))
    random.shuffle(sorted_files)
    try:
        for file in sorted_files[start:end]:
            scheduler.add_job(process_file,
                              args=[
                                  file, analysis_result_folder, blast_result,
                                  blast_results_folder
                              ])

        scheduler.start()

        while len(scheduler.get_jobs()):
            time.sleep(1)
            continue
예제 #14
0
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
app = Flask(__name__)
DB = SQLAlchemy(app)
app.config.from_pyfile('../conf/redis.conf')
app.config.from_pyfile('../conf/sql.conf')
app.config.from_pyfile('../conf/task.conf')
logging.basicConfig()
logging.getLogger('apscheduler').setLevel(logging.DEBUG)
redis_host = app.config.get('REDIS_HOST')
redis_port = app.config.get('REDIS_PORT')
redis_password = app.config.get('REDIS_PASSWORD')
task_hosts = app.config.get('TASK_HOSTS')
RC = Redis = redis.StrictRedis(host=redis_host, port=redis_port,decode_responses=True)
HOST = socket.gethostbyname(socket.gethostname())
jobstores = {'default': SQLAlchemyJobStore(url=app.config.get('SQLALCHEMY_BINDS')['idc'])}
executors = {'default': ThreadPoolExecutor(50),'processpool': ProcessPoolExecutor(8)}
job_defaults = {'coalesce': False,'max_instances': 3,'misfire_grace_time':60}
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=pytz.timezone('Asia/Shanghai'))
#单点后台执行
def scheduler_tasks():
    date_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
    run_date = date_time.strftime('%H:%M').split(':')
    scheduler.remove_all_jobs()
    ################################################################################################################################################
    #scheduler.add_job(Task.zabbix_counts_task, 'cron', second='0', minute=run_date[1], hour=run_date[0],id=Task.zabbix_counts_task.__name__, replace_existing=True)
    scheduler.add_job(Task.business_monitor_task, 'cron', second='0', minute='*', id=Task.business_monitor_task.__name__,replace_existing=True)
    scheduler.add_job(Task.es_log_status, 'cron', second='0', minute='*', id=Task.es_log_status.__name__,replace_existing=True)
    scheduler.add_job(Task.es_log_time, 'cron', second='0', minute='*', id=Task.es_log_time.__name__,replace_existing=True)
    scheduler.add_job(Task.business_data, 'cron', second='0', minute='*', id=Task.business_data.__name__, replace_existing=True)
    scheduler.add_job(Task.assets_infos, 'cron', second='0', minute='30',hour='4',id=Task.assets_infos.__name__,replace_existing=True)
    scheduler.add_job(Task.auto_discovery_task, 'cron', second='0', minute='0', hour='*/4',id=Task.auto_discovery_task.__name__,replace_existing=True)
예제 #15
0
        # from . import database as db
        settings = load_settings()
        db_sess = None
        fetcher = NUSeriesUpdateFilter.NUSeriesUpdateFilter(db_sess, settings)
        print(fetcher.handlePage("https://www.novelupdates.com"))
    except:
        import traceback

        print("ERROR: Failure when running job!")
        traceback.print_exc()
    # finally:
    # 	db_sess.close()


executors = {
    'main_jobstore': ThreadPoolExecutor(5),
}
job_defaults = {
    'coalesce': True,
    'max_instances': 1,
}

jobstores = {'main_jobstore': MemoryJobStore()}


def start_scheduler():

    sched = BackgroundScheduler(jobstores=jobstores,
                                executors=executors,
                                job_defaults=job_defaults)
예제 #16
0
# log = logging.getLogger('apscheduler.executors.default')
# log.setLevel(logging.INFO)  # DEBUG
#
# fmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
# h = logging.StreamHandler()
# h.setFormatter(fmt)
# log.addHandler(h)

def daily_start():
    """
    启动日常爬虫
    :param tasks_daily: 所有日常爬虫的list
    :return:
    """
    task_seconds, tasks_daily = get_tasks()
    for task in tasks_daily:
        start(task)



if __name__ == '__main__':


    scheduler_daily = BackgroundScheduler(executors={'default': ThreadPoolExecutor(1)})
    scheduler_daily.add_job(daily_start, 'cron', hour=HOUR, minute=MIN, second=SEC)

    scheduler_daily.start()
    while True:
        time.sleep(3600)
예제 #17
0
 def run(self, output_fn, **kwargs):
     '''处理数据库中的任务队列'''
     # 引入MySQL配置
     from Functions import AppServer
     db_name = AppServer().getConfValue('Databases', 'MysqlDB')
     db_user = AppServer().getConfValue('Databases', 'MysqlUser')
     db_pass = AppServer().getConfValue('Databases', 'MysqlPass')
     db_ip = AppServer().getConfValue('Databases', 'MysqlHost')
     db_port = AppServer().getConfValue('Databases', 'MysqlPort')
     dbconn = 'mysql://%s:%s@%s:%s/%s' % (db_user, db_pass, db_ip,
                                          int(db_port), db_name)
     from MySQL import writeDb
     # 尝试清空DB数据库中记录的JOB信息
     try:
         sql = """delete from apscheduler_jobs ;"""
         writeDb(sql, )
     except:
         True
     # 动态引入任务函数
     moduleSrc = 'TaskFunctions'
     dylib = importlib.import_module(moduleSrc)
     # 重新加载job队列[两种类型调度器按情况选择]
     from apscheduler.schedulers.background import BackgroundScheduler
     from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
     job_defaults = {'max_instances': 1}
     executors = {
         'default': ThreadPoolExecutor(20),
         'processpool': ProcessPoolExecutor(5)
     }
     scheduler = BackgroundScheduler(timezone='Asia/Shanghai',
                                     executors=executors,
                                     job_defaults=job_defaults)
     # sqlite or mysql
     scheduler.add_jobstore('sqlalchemy', url='%s' % dbconn)
     from MySQL import readDb
     sql = """ Select id,timedesc from taskconf where status='1' """
     result = readDb(sql, )
     for taskobject in result:
         Taskid = 'TaskID_%s' % taskobject.get('id')
         FunName = 'TaskFunc_%s' % taskobject.get('id')
         function = getattr(dylib, FunName)
         cronlist = taskobject.get('timedesc').strip().split(' ')
         print cronlist
         if len(cronlist) == 5:
             scheduler.add_job(func=function,
                               trigger='cron',
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         elif len(cronlist) == 6:
             scheduler.add_job(func=function,
                               trigger='cron',
                               day_of_week=cronlist[5],
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         else:
             continue
     scheduler.start()
     fd = open(output_fn, 'a')
     try:
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Starting all Tasks...\n' % dtnow
         fd.write(line)
         fd.flush()
         while 1:
             pass
     except KeyboardInterrupt:  #捕获键盘ctrl+c,在此脚本中不生效,console下可用
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Stoping all Tasks, please wait...\n' % dtnow
         fd.write(line)
         fd.close()
         scheduler.shutdown()
         time.sleep(1)
         os._exit(0)
예제 #18
0
class BaseConfig:
    """
    基础配置
    """
    db_info = {
        "engine": "postgresql",
        "driver": "psycopg2",
        "user": "******",
        "password": "******",
        "host": "192.168.158.14",
        "port": 5432,
        "name": "SnailData"
    }
    DEBUG = False
    TESTING = False
    # ================[ 接口版本设置 ]================
    API_INTERFACE = "/api"

    # ================[ 上传文件设置 ]================
    BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
    # 上传的文件所在的文件夹名称
    DIR_NAME = "scripts"
    UPLOAD_DIR = join(BASE_DIR, DIR_NAME)

    # ================[ APScheduler设置 ]================
    # 时区
    SCHEDULER_TIMEZONE = 'Asia/Shanghai'

    # 是否使用flask-apscheduler自带的API
    SCHEDULER_API_ENABLED = False

    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=get_db_uri(db_info))
    }
    SCHEDULER_EXECUTORS = {
        'default': ThreadPoolExecutor(100),
        'processpool': ProcessPoolExecutor(max_workers=50),
    }
    """
    coalesce:           是否允许job实例合并
    比如上个任务实例运行时间超过了运行间隔,到了再次运行新实例的时间,
    这个时候,是否将两个实例合并为一个实例运行
        True:合并,一个任务只会运行一个实例
        False:不合并,一个任务会同时运行多个实例

    [2020-07-27]
    replace_existing:
    如果在程序(这里指scheduler)初始化期间,在持久化作业存储中安排作业,
    必须给作业指定一个显示的ID,并且设置replace_existing=True,
    否则,每次程序重启的时候,会得到该job的一个新副本

    max_instances:      单个job最多可以运行几个实例
    misfire_grace_time: 任务因不可抗力在规定时间点没有执行时,允许自动补偿执行的宽限期
                            假设该值为1800s(0.5h),任务将在9:00 运行,结果 8:55 ~ 9:10调度程序挂了,
                            那么只要在9:00 ~ 9:30内调度程序恢复正常(按1800s值计算),该任务会马上执行。
                            简称:虽迟但执
    """
    SCHEDULER_JOB_DEFAULTS = {
        "coalesce": True,
        "replace_existing": True,
        "max_instances": 1,
        "misfire_grace_time": 1800
    }

    #
    # 管理员用户设置
    ADMINS = ("snail", )

    # ===================[ session设置 ]===================
    #
    SECRET_KEY = "4)rzG[giX:{>)2>_Np'`X-Q&YZFzj@5-"
    SESSION_USE_SIGNER = True  # 对发送到浏览器上的cookie进行加密
    SESSION_TYPE = "sqlalchemy"
    SESSION_SQLALCHEMY = db

    # Sqlalchemy 使用的数据库
    SQLALCHEMY_DATABASE_URI = get_db_uri(db_info)  # "sqlite:///sqlite.db"
    SQLALCHEMY_TRACK_MODIFICATIONS = True

    # ===================[ 缓存设置 ]===================
    # 缓存类型
    CACHE_TYPE = "filesystem"
    # 缓存类型为"filesystem"时,该值要指定。
    # 注意:指定的是目录,不是文件
    CACHE_DIR = "cache"
    # 缓存数据超时时间(seconds)
    CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24 * 30
예제 #19
0
from apscheduler.schedulers.background import BackgroundScheduler
from flask import request
from flask import Flask, jsonify
from flask_restful import Api, Resource
import datetime
import threading
from apscheduler.executors.pool import ThreadPoolExecutor
executors = {"default": ThreadPoolExecutor(3)}


class Test(Resource):
    def __init__(self):
        self.scheduler = BackgroundScheduler(executors=executors)

    def send_sms(self, name, task_id):
        print("你是 ", name, task_id, threading.current_thread())

    def make_scheduler(self, name, task_id):

        self.scheduler.add_job(func=self.send_sms,
                               args=(
                                   name,
                                   task_id,
                               ),
                               next_run_time=datetime.datetime.now() +
                               datetime.timedelta(minutes=1),
                               id=task_id)

    def start_sched(self, name, task_id):
        self.make_scheduler(name, task_id)
        self.scheduler.start()
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import re
import datetime
import os
from .alarm_regex_common import *
from config import logger, LOG_PRIORITY
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler
from common import get_interval_data

executors = {
    "default": ThreadPoolExecutor(20),
    "processpool": ProcessPoolExecutor(5)
}

job_defaults = {"coalesce": False, "max_instances": 3}
scheduler = BackgroundScheduler()
scheduler.configure(executors=executors, job_defaults=job_defaults)
# Interval time (in minutes) before sending clear alarm
interval = get_interval_data()

LOADCONFIGURATIONFAILED = "LoadConfigurationFailed"
LOADZONEFAILED = "LoadZoneFailed"
TSIGBADTIME = "TsigBadTime"
NETWORKINTERFACEDOWN = "NetworkInterfaceDown"
STORAGEREADONLY = "StorageReadOnly"
ZONETRANSFERFAILED = "ZoneTransferFailed"
예제 #21
0
def create_app(config, enable_config_file=False):
    """
    创建应用
    :param config: 配置信息对象
    :param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息
    :return: 应用
    """
    app = create_flask_app(config, enable_config_file)

    # 创建Snowflake ID worker
    from utils.snowflake.id_worker import IdWorker
    app.id_worker = IdWorker(app.config['DATACENTER_ID'],
                             app.config['WORKER_ID'], app.config['SEQUENCE'])

    # 限流器
    from utils.limiter import limiter as lmt
    lmt.init_app(app)

    # 配置日志
    from utils.logging import create_logger
    create_logger(app)

    # 注册url转换器
    from utils.converters import register_converters
    register_converters(app)

    from redis.sentinel import Sentinel
    _sentinel = Sentinel(app.config['REDIS_SENTINELS'])
    app.redis_master = _sentinel.master_for(
        app.config['REDIS_SENTINEL_SERVICE_NAME'])
    app.redis_slave = _sentinel.slave_for(
        app.config['REDIS_SENTINEL_SERVICE_NAME'])

    from rediscluster import StrictRedisCluster
    app.redis_cluster = StrictRedisCluster(
        startup_nodes=app.config['REDIS_CLUSTER'])

    # rpc
    app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND)

    # Elasticsearch
    app.es = Elasticsearch(
        app.config['ES'],
        # sniff before doing anything
        sniff_on_start=True,
        # refresh nodes after a node fails to respond
        sniff_on_connection_fail=True,
        # and also every 60 seconds
        sniffer_timeout=60)

    # socket.io
    # app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True)

    # MySQL数据库连接初始化
    from models import db

    db.init_app(app)

    # 添加请求钩子
    from utils.middlewares import jwt_authentication
    app.before_request(jwt_authentication)

    # 注册用户模块蓝图
    from .resources.user import user_bp
    app.register_blueprint(user_bp)

    # 注册新闻模块蓝图
    from .resources.news import news_bp
    app.register_blueprint(news_bp)

    # 注册通知模块
    from .resources.notice import notice_bp
    app.register_blueprint(notice_bp)

    # 搜索
    from .resources.search import search_bp
    app.register_blueprint(search_bp)

    # 定义apscheduler的调度器对象
    # 保存到flask的app对象,方便在视图中使用调度器添加新的定时任务
    executors = {
        'default': ThreadPoolExecutor(20),
    }
    app.scheduler = BackgroundScheduler(executors=executors)

    # 由scheduler管理的定时任务 两种:
    # 一种是一开始就明确确定的 ,比如 修正redis的统计数据
    # 在此处定义 add_job
    # app.scheduler.add_job()

    # 添加定时修正统计数据的定时任务
    from .schedulers.statistic import fix_statistics
    # 每天的凌晨3点执行
    # 通过args 可以在调度器执行定时任务方法的时候,传递给定时任务方法参数
    # app.scheduler.add_job(fix_statistics, 'cron', hour=3, args=[app])

    # 为了测试方便,需要立即执行
    app.scheduler.add_job(fix_statistics, 'date', args=[app])

    # 另一种 是在flask运行期间,由视图函数产生的,动态添加的新定时任务
    # 在视图函数中 调用 current_app.scheduler.add_job来添加

    app.scheduler.start()  # 非阻塞,不会阻塞住flask程序的执行,会在后台单独创建进程或线程进行计时

    return app
예제 #22
0
        dump.crawl_by_search(p, skip_exists=False)


bins_az = [
    "",
    "2017-09-01",
] + all_months_since((2017, 11)) + [""]
bins_fgo = ["", "2015-12-01", "2016-02-01", "2016-04-01"] + all_months_since(
    (2016, 5)) + [""]
bins_all = [
    "", "2015-01-01", "2017-01-01", "2018-01-01", "2019-01-01", "2020-01-01",
    ""
]

if __name__ == "__main__":
    scheduler = Scheduler(executers={"default": ThreadPoolExecutor(8)})

    scheduler.add_job(check_all_month,
                      kwargs={
                          'params': {
                              'word': '10000users入り',
                              's_mode': 's_tag',
                              'mode': 'r18'
                          },
                          'bins': bins_all
                      },
                      trigger='cron',
                      second='0',
                      minute='0',
                      hour='2',
                      day_of_week='sun',
예제 #23
0
    def add(self):
        for d in range(100, 110):
            self.t.__class__.cache.add(d)
            time.sleep(randint(1, 3))
            print d, len(self.t.__class__.cache)


if __name__ == '__main__':
    from apscheduler.jobstores.memory import MemoryJobStore
    from apscheduler.schedulers.blocking import BlockingScheduler
    from apscheduler.executors.pool import ThreadPoolExecutor

    jobstores = {'default': MemoryJobStore()}

    # using ThreadPoolExecutor as default other than ProcessPoolExecutor(not work) to executors
    executors = {
        'default': ThreadPoolExecutor(4),
    }

    job_defaults = {'coalesce': False, 'max_instances': 1}
    app = BlockingScheduler(jobstores=jobstores,
                            executors=executors,
                            job_defaults=job_defaults)

    def task():
        print len(CacheTest.cache)

    app.add_job(task, 'interval', seconds=2)
    app.start()
예제 #24
0
_data_dir = join(_base_dir, 'data')

if not os.path.exists(_tmp_dir):
    os.mkdir(_tmp_dir)

if not os.path.exists(_log_dir):
    os.mkdir(_log_dir)

if not os.path.exists(_data_dir):
    os.mkdir(_data_dir)

# a LIFO stack for storing failed uploads to be accessible by uploader job.
upload_stack = LifoQueue(maxsize=5)

# executors for job schedulers (max threads: 30)
sched = BlockingScheduler(executors={'default': ThreadPoolExecutor(30)})

# Application logger
logger = Logger(name='SkyScanner')

if Config.log_to_console:
    logger.add_stream_handler()

if Config.log_path:
    log_file_path = join(Config.log_path, logger.name)
    logger.add_timed_rotating_file_handler(log_file=log_file_path)

if Config.dashboard_enabled:
    logger.add_influx_handler(username=Config.influxdb_user,
                              pwd=Config.influxdb_pwd,
                              host=Config.influxdb_host,
예제 #25
0
    finishedMutex.acquire()
    finishedThreads = finishedThreads + 1
    print "[Worker {0}]\tFinished items(count me): {1}".format(worker, finishedThreads)
    if finishedThreads==numWorkers:
        print "[Worker {0}]\tAll finished. Send event".format(worker)
        event.set()
    finishedMutex.release()
    print "[Worker {0}]\tEXIT".format(worker)

finishedThreads = 0
jobs = range(1, 51)
worker = 0

mutex = threading.Lock()
finishedMutex = threading.Lock()
event = threading.Event()

scheduler = BackgroundScheduler()
scheduler.add_executor(ThreadPoolExecutor(max_workers=20))
scheduler.start()


for job in jobs:
    worker += 1
    scheduler.add_job(threadFunc, args=[worker, len(jobs)], max_instances=1000, misfire_grace_time=86400)

print "[Main]\t\tWait for event"
event.wait()
print "[Main]\t\tExit"
예제 #26
0
 def _create_default_executor(self):
     """Creates a default executor store, specific to the particular scheduler type."""
     return ThreadPoolExecutor()
예제 #27
0
from apscheduler.jobstores.redis import RedisJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
jenkins_url = 'http://localhost:8080'  #jenkins的地址
jenkins_user = '******'  #jenkins的用户名
jenkins_password = '******'  #jenkins的密码
xitong_request_toke = 'Fetext_token_system'  #系统内部依赖接口请求的时候需要加个token来区分
Try_Num_Case = 5  #重试的次数
Interface_Time_Out = 5000  #超时时间
redis_password = '******'
max_connec_redis = 10
test_fail_try_num = 3
jobstores = {
    'redis': RedisJobStore(),
}
executors = {
    'default': ThreadPoolExecutor(10),
    'processpool': ProcessPoolExecutor(3)
}
PageShow = 25  #这里配置的就是每个页显示多少条数据
Dingtalk_access_token = ''  #在这里配置您的接受通知的钉钉群自定义机器人webhook,
OneAdminCount = 10  #设置项目管理员的数量
Config_daoru_xianzhi = 50  #配置可以导入限制
save_duration = 24 * 60 * 60  #配置redis存储的时长
redis_host = 'localhost'
redis_port = 6379
redis_save_result_db = 2


class dev(object):  #研发环境配置
    SECRET_KEY = 'BaSeQuie'
    basedir = os.path.abspath(os.path.dirname(__file__))
예제 #28
0
    return str(tz)


ansible_scheduler = BackgroundScheduler({
    'apscheduler.job_defaults.coalesce':
    'true',
    'apscheduler.job_defaults.max_instances':
    '5',
    'apscheduler.timezone':
    get_timezone(),
})

logger.error("create backgroupd schedule with id=%s" % id(ansible_scheduler))
ansible_scheduler.add_jobstore(DjangoJobStore(), 'default')
# ansible_scheduler.add_executor(ProcessPoolExecutor(10), 'default')
ansible_scheduler.add_executor(ThreadPoolExecutor(20), 'default')
ansible_scheduler.start()


def run_cron_job(op, group, entity):
    url = ansible_get_git_url()
    prj_name = url.split('/')[-1].split('.')[0]
    message = {}
    message['event'] = "APSchedule"
    message['type'] = "routine"
    message["role"] = op
    message['group'] = group
    message["host"] = entity
    message["src_path"] = ANSIBLE_PATH + prj_name
    message['username'], message['password'] = get_host_username_and_password(
        entity)