def __init__(self,
                 useEmulator: bool = False,
                 enableSenseHAT: bool = False,
                 pollRate: int = 5,
                 allowConfigOverride: bool = True):
        """
        Init the SensorAdapterManager, if using simulator, setup data sets and sim tasks for simulation

        :param useEmulator: Whether use Emulator
        :param pollRate: Interval seconds for polling sensor data
        :param allowConfigOverride: If allow to override config
        """
        logging.info("SensorAdapterManager is initializing...")
        # Init basic config variables
        self.useEmulator = useEmulator
        self.pollRate = pollRate
        self.allowConfigOverride = allowConfigOverride
        # Init data message listener
        self.dataMsgListener: IDataMessageListener = None
        # Init scheduler
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(self.handleTelemetry,
                               'interval',
                               seconds=self.pollRate)

        configUtil = ConfigUtil()
        self.enableSenseHAT = enableSenseHAT

        if self.enableSenseHAT is True:
            logging.info("SensorAdapterManager is using SenseHAT.")
            self.humiditySensorI2cTask = HumidityI2cSensorAdapterTask()
            self.pressureSensorI2cTask = PressureI2cSensorAdapterTask()
            self.temperatureSensorI2cTask = TemperatureI2cSensorAdapterTask()
            pass
        elif self.useEmulator is True:
            logging.info("SensorAdapterManager is using emulator.")

            humidityModule = __import__(
                'programmingtheiot.cda.emulated.HumiditySensorEmulatorTask',
                fromlist=['HumiditySensorEmulatorTask'])
            huClass = getattr(humidityModule, 'HumiditySensorEmulatorTask')
            self.humidityEmulator = huClass()

            pressureModule = __import__(
                'programmingtheiot.cda.emulated.PressureSensorEmulatorTask',
                fromlist=['PressureSensorEmulatorTask'])
            prClass = getattr(pressureModule, 'PressureSensorEmulatorTask')
            self.pressureEmulator = prClass()

            tempModule = __import__(
                'programmingtheiot.cda.emulated.TemperatureSensorEmulatorTask',
                fromlist=['TemperatureSensorEmulatorTask'])
            teClass = getattr(tempModule, 'TemperatureSensorEmulatorTask')
            self.tempEmulator = teClass()
        else:
            logging.info("SensorAdapterManager is using simulators.")
            self.dataGenerator = SensorDataGenerator()

            humidityFloor = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE,
                ConfigConst.HUMIDITY_SIM_FLOOR_KEY,
                SensorDataGenerator.LOW_NORMAL_ENV_HUMIDITY)
            humidityCeiling = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE,
                ConfigConst.HUMIDITY_SIM_CEILING_KEY,
                SensorDataGenerator.HI_NORMAL_ENV_HUMIDITY)
            pressureFloor = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE,
                ConfigConst.PRESSURE_SIM_FLOOR_KEY,
                SensorDataGenerator.LOW_NORMAL_ENV_PRESSURE)
            pressureCeiling = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE,
                ConfigConst.PRESSURE_SIM_CEILING_KEY,
                SensorDataGenerator.HI_NORMAL_ENV_PRESSURE)
            tempFloor = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE, ConfigConst.TEMP_SIM_FLOOR_KEY,
                SensorDataGenerator.LOW_NORMAL_INDOOR_TEMP)
            tempCeiling = configUtil.getFloat(
                ConfigConst.CONSTRAINED_DEVICE,
                ConfigConst.TEMP_SIM_CEILING_KEY,
                SensorDataGenerator.HI_NORMAL_INDOOR_TEMP)
            humidityData = self.dataGenerator.generateDailyEnvironmentHumidityDataSet(
                minValue=humidityFloor,
                maxValue=humidityCeiling,
                useSeconds=False)
            pressureData = self.dataGenerator.generateDailyEnvironmentPressureDataSet(
                minValue=pressureFloor,
                maxValue=pressureCeiling,
                useSeconds=False)
            tempData = self.dataGenerator.generateDailyIndoorTemperatureDataSet(
                minValue=tempFloor, maxValue=tempCeiling, useSeconds=False)
            self.humiditySensorSimTask = HumiditySensorSimTask(
                dataSet=humidityData,
                minVal=humidityFloor,
                maxVal=humidityCeiling)
            self.pressureSensorSimTask = PressureSensorSimTask(
                dataSet=pressureData,
                minVal=pressureFloor,
                maxVal=pressureCeiling)
            self.temperatureSensorSimTask = TemperatureSensorSimTask(
                dataSet=tempData, minVal=tempFloor, maxVal=tempCeiling)
        pass
from flask import Flask, request, jsonify
import os
from flask_cors import CORS
import csv
from apscheduler.schedulers.background import BackgroundScheduler

# configuration
DEBUG = True

# instantiate the app
app = Flask(__name__)
app.config.from_object(__name__)

#cron scheduler to fetch records
sched = BackgroundScheduler(daemon=True)

def fetch():
    os.system('python run.py')

sched.add_job(fetch, 'cron', minute='*', jitter=120)
sched.start()

# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})


@app.route('/test', methods=['GET'])
def ping_pong():
    return jsonify('Flask Background Scheduler Cron Job')

if __name__ == '__main__':
示例#3
0
文件: sbb.py 项目: araikc/sbb
########## Scheduler ##########

jobstores = {
    # 'mongo': MongoDBJobStore(),
    'default': SQLAlchemyJobStore(url=application.config['SQLALCHEMY_DATABASE_URI'])
}
executors = {
    'default': ThreadPoolExecutor(20),
    'processpool': ProcessPoolExecutor(5)
}
job_defaults = {
    'coalesce': False,
    'max_instances': 3
}
scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

@scheduler.scheduled_job('cron', id="job_id", day='*', hour=0, minute=0, second=0, start_date='2018-02-24 00:00:00')
def reward_investments():
	#print('Tick! The time is: %s' % datetime.now())
	from models import AccountInvestments
	from sbb import db

	accInvs = AccountInvestments.query.filter_by(isActive=1).all()
	for ai in accInvs:
		perc = ai.investmentPlan.percentage
		ai.currentBalance += float(float(ai.initialInvestment/100)*perc)
		db.session.add(ai)
	db.session.commit()

scheduler.start()
示例#4
0
def load_on_scheduler(func, interval_sec):
    print('Loading on scheduler...')
    scheduler = BackgroundScheduler()
    scheduler.add_job(func, 'interval', seconds=interval_sec)
    scheduler.start()
示例#5
0
        logger.info('delete a rent application: ' + str(instance))
        instance.delete()

    filter_fields = '__all__'
    search_fields = ['description', 'comments', 'user_comments']
    ordering_fields = '__all__'


from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.memory import MemoryJobStore
from django_apscheduler.jobstores import register_job

try:
    # 实例化调度器

    scheduler = BackgroundScheduler(jobstores={'default': MemoryJobStore()})

    # 调度器使用DjangoJobStore()
    # scheduler.add_jobstore(DjangoJobStore(), "default")
    # ('scheduler',"interval", seconds=1)  #用interval方式循环,每一秒执行一次
    @register_job(scheduler, 'interval', minutes=30, id='expire_reminder')
    def expire_reminder():
        utc_tz = pytz.timezone('UTC')
        rent_applications = RentApplication.objects.filter(applying=True)
        for rent_application in rent_applications:
            lease_term_end = rent_application.lease_term_end
            current_time = datetime.datetime.now(tz=utc_tz)
            seconds_delta = (lease_term_end - current_time).total_seconds()

            if seconds_delta > 60 * 60 * 24:
                continue
示例#6
0
def cli(docker_sockets, docker_tls, docker_tls_verify, interval, cron, log_level, run_once, notifiers,
        skip_start_notif, label, cleanup, repo_user, repo_pass, stop_signal, disable_services_check,
        disable_containers_check, template_file, hostname, latest, wait, recreate_first):
    """Declare command line options"""

    # Create App logger
    log = DocupdaterLogger(level=log_level)
    log.logger.info('Version: %s-%s', VERSION, BRANCH)

    config = Config(docker_sockets=docker_sockets,
                    docker_tls=docker_tls,
                    docker_tls_verify=docker_tls_verify,
                    interval=interval,
                    cron=cron,
                    log_level=log_level,
                    run_once=run_once,
                    notifiers=notifiers,
                    skip_start_notif=skip_start_notif,
                    label=label,
                    cleanup=cleanup,
                    repo_user=repo_user,
                    repo_pass=repo_pass,
                    stop_signal=stop_signal,
                    disable_services_check=disable_services_check,
                    disable_containers_check=disable_containers_check,
                    template_file=template_file,
                    hostname=hostname,
                    latest=latest,
                    wait=wait,
                    recreate_first=recreate_first)
    config.config_blacklist()  # Configure mask on logger

    log.logger.debug("pyupdater configuration: %s", config.options)

    notification_manager = NotificationManager(config)
    scheduler = BackgroundScheduler()
    scheduler.start()

    for socket in config.docker_sockets:
        try:
            docker = Docker(socket, config, notification_manager)
            scanner = Scanner(docker)

            # Always begin to check the self update
            scanner.self_update()
            # Check docker swarm mode is running on a manager
            scanner.check_swarm_mode()

            if config.cron:
                scheduler.add_job(
                    scanner.update,
                    name=f'Cron container update for {socket}',
                    trigger='cron',
                    minute=config.cron[0],
                    hour=config.cron[1],
                    day=config.cron[2],
                    month=config.cron[3],
                    day_of_week=config.cron[4],
                    misfire_grace_time=20
                )
            else:
                if config.run_once:
                    scheduler.add_job(scanner.update, name=f'Run Once container update for {socket}')
                else:
                    scheduler.add_job(
                        scanner.update,
                        name=f'Initial run interval container update for {socket}'
                    )
                    scheduler.add_job(
                        scanner.update,
                        name=f'Interval container update for {socket}',
                        trigger='interval', seconds=config.interval,
                        misfire_grace_time=20
                    )
        except ConnectionError:
            log.logger.error("Could not connect to socket %s. Check your config", config.socket)

    if config.run_once:
        next_run = None
    elif config.cron:
        next_run = scheduler.get_jobs()[0].next_run_time
    else:
        now = datetime.now(timezone.utc).astimezone()
        next_run = (now + timedelta(0, config.interval)).strftime("%Y-%m-%d %H:%M:%S")

    if not config.skip_start_notif:
        notification_manager.send(StartupMessage(config.hostname, next_run=next_run), config.notifiers)

    while scheduler.get_jobs():
        sleep(10)

    scheduler.shutdown()
print (time.time())
Timer(5,  print_time, ( "参数1", )).start() # 参数后面的逗号不能少
Timer(10, print_time, ( "参数2", )).start()
print (time.time()) # 并不等上面两个执行
#这样的话,从程序开始执行到5,秒,10秒都会执行一次print_time这个方法。

################################################################################
若是CPU密集型,需考虑用ProcessPoolExecutor 代替 ThreadPoolExecutor
APScheduler带有三个内置触发类型:
• date: 当你想在某个时间点刚好运行一次工作中使用
• interval: 当你想在固定的时间间隔运行作业使用
• cron: use 当你想在一天中的特定时间(s)定期运行工作中使用

from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
# Initialize the rest of the application here, or before the scheduler initialization
# 这里默认使用MemoryJobStore和ThreadPoolExecutor(默认最大线程数为10)

# 下面三个实例完全是等价的
实例1:
from pytz import utc
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.mongodb import MongoDBJobStore
# from apscheduler.jobstores.redis import RedisJobStore
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
jobstores = {
            'mongo': MongoDBJobStore(),
            'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
            }
示例#8
0
def create_app():
    app = Flask(__name__)
    app.config.from_object(CONFIG_CLASS)

    init_logging()

    socketio = SocketIO(app, cors_allowed_origins="*")

    if os.getenv("FLASK_ENV") == "development":
        app = register_teardown_request(app)

    # read directory mount based config into Flask config
    try:
        conf_data = get_user_conf()
        app.config.update(conf_data)
    except Exception:
        app.logger.warning("Failed to load config.json")

    app.config["ORCHEST_REPO_TAG"] = get_repo_tag()

    # create thread for non-cpu bound background tasks, e.g. requests
    scheduler = BackgroundScheduler(
        job_defaults={
            # Infinite amount of grace time, so that if a task cannot be
            # instantly executed (e.g. if the webserver is busy) then it
            # will eventually be.
            "misfire_grace_time": 2**31,
            "coalesce": False,
            # So that the same job can be in the queue an infinite
            # amount of times, e.g. for concurrent requests issuing the
            # same tasks.
            "max_instances": 2**31,
        })
    app.config["SCHEDULER"] = scheduler
    scheduler.start()

    app.logger.info("Flask CONFIG: %s" % app.config)

    # Create the database if it does not exist yet. Roughly equal to a
    # "CREATE DATABASE IF NOT EXISTS <db_name>" call.
    if not database_exists(app.config["SQLALCHEMY_DATABASE_URI"]):
        create_database(app.config["SQLALCHEMY_DATABASE_URI"])
    db.init_app(app)
    ma.init_app(app)
    # necessary for migration
    Migrate().init_app(app, db)

    with app.app_context():

        # Alembic does not support calling upgrade() concurrently
        if not is_werkzeug_parent():
            # Upgrade to the latest revision. This also takes care of
            # bringing an "empty" db (no tables) on par.
            try:
                upgrade()
            except Exception as e:
                logging.error("Failed to run upgrade() %s [%s]" % (e, type(e)))

            initialize_default_datasources(db, app)

    # Telemetry
    if not app.config["TELEMETRY_DISABLED"]:
        # initialize posthog
        posthog.api_key = base64.b64decode(
            app.config["POSTHOG_API_KEY"]).decode()
        posthog.host = app.config["POSTHOG_HOST"]

        # send a ping now
        analytics_ping(app)

        # and every 15 minutes
        scheduler.add_job(
            analytics_ping,
            "interval",
            minutes=app.config["TELEMETRY_INTERVAL"],
            args=[app],
        )

    # static file serving
    @app.route("/public/<path:path>")
    def send_files(path):
        return send_from_directory("../static", path)

    register_views(app, db)
    register_orchest_api_views(app, db)
    register_background_tasks_view(app, db)
    register_socketio_broadcast(db, socketio)
    register_analytics_views(app, db)

    processes = []

    if not is_werkzeug_parent():

        file_dir = os.path.dirname(os.path.realpath(__file__))

        # log_streamer process
        log_streamer_process = Popen(
            ["python3", "-m", "scripts.log_streamer"],
            cwd=os.path.join(file_dir, ".."),
            stderr=subprocess.STDOUT,
        )

        app.logger.info("Started log_streamer.py")
        processes.append(log_streamer_process)

    return app, socketio, processes
示例#9
0
from firebase_admin import credentials, firestore, datetime
import firebase_admin
import requests
import time
import re
import os
import math
import decimal
from datetime import timedelta, date, datetime
from flask_cors import CORS

app = Flask(__name__)
CORS(app)

# Background Scheduler
cron = BackgroundScheduler()

# Database
cred = credentials.Certificate('serviceAccountKey.json')
firebase_admin.initialize_app(cred)


class Site_Data:
    def __init__(self, url):
        self.url = url
        self.get_site()

    def process_article(self):
        # Get Schema
        try:
            schemaSearch = self.soup.find('script',
示例#10
0
import json
import tweepy
import logging
from queue import Queue
from telegram import Bot
from botInfo import tg_id
from botMarkov import gen_model
from botTools import query_token
from telegram.ext import Dispatcher
from apscheduler.schedulers.background import BackgroundScheduler


scheduler = BackgroundScheduler(misfire_grace_time=60)

twitter_token = json.loads(query_token('luxun'))
token_auth = tweepy.OAuthHandler(twitter_token['consumer_key'], twitter_token['consumer_secret'])
token_auth.set_access_token(twitter_token['access_token'], twitter_token['access_token_secret'])
lx_twi = tweepy.API(token_auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)

lx_model = gen_model()

lx_tg = Bot(query_token(tg_id))
update_queue = Queue()
dp = Dispatcher(lx_tg, update_queue, use_context=True)

logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
示例#11
0
def Initialize(cfg_path: str, main_path: str):
    """
COS 模块初始化,此函数应在所有函数之前调用
    :param cfg_path: 配置文件地址。
    :param main_path: 程序主目录地址。
    """
    global cf
    cf = configparser.ConfigParser()
    cf.read(cfg_path)

    # 设置用户属性, 包括secret_id, secret_key, region
    # appid已在配置中移除,请在参数Bucket中带上appid。Bucket由bucketname-appid组成
    global secret_id, secret_key, region, token, config, client, bucket, cache_time
    try:
        secret_id = str(cf.get("COS", "secret_id"))  # 替换为用户的secret_id
        secret_key = str(cf.get("COS", "secret_key"))  # 替换为用户的secret_key
        region = str(cf.get("COS", "region"))  # 替换为用户的region
        token = None  # 使用临时秘钥需要传入Token,默认为空,可不填
        bucket = str(cf.get("COS", "bucket"))
        cache_time_str = str(cf.get("COS",
                                    "cache_time"))  # 本地缓存清理时间,以天为计,若参数出错默认为3
        if cache_time_str.isdigit():
            cache_time = int(cache_time_str)
        else:
            cache_time = 3
        print("[COS]secret_id:", secret_id)
        print("[COS]secret_key:", secret_key)
        print("[COS]region:", region)
        print("[COS]bucket:", bucket)
        print("[COS]cache_time:{}".format(cache_time))
        config = CosConfig(Region=region,
                           SecretId=secret_id,
                           SecretKey=secret_key,
                           Token=token)  # 获取配置对象
        client = CosS3Client(config)
    except Exception as e:
        log_cos.error("UnkownError:", e)
        print("UnkownError:", e)
        log_cos.info("Program Ended")
        sys.exit()
    global Main_filepath
    Main_filepath = main_path
    # print(Main_filepath)
    global server_mode
    server_mode = online_check()
    if server_mode:
        cf.set("COS", "server_mode", "1")
        print("[COS]Online Server is available")
        log_cos.info("Cos Server is available")
    else:
        cf.set("COS", "server_mode", "0")
        os.makedirs(os.path.join(Main_filepath, "data", "local", "portrait"),
                    exist_ok=True)
        os.makedirs(os.path.join(Main_filepath, "data", "local", "article"),
                    exist_ok=True)
        print("[COS]Online Server is unavailable , using local storage server")
        log_cos.warning("Cos is unavailable , using local storage mode")

    # todo 接下来将对server_mode进行情况处理

    # 初始化缓存清理 schedule
    global scheduler
    jobstores = {
        'redis': RedisJobStore(0, host="localhost", port=6379, password=""),
        'default': SQLAlchemyJobStore(url='sqlite:///data/jobs.sqlite')
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    # BackgroundScheduler: 适合于要求任何在程序后台运行的情况,当希望调度器在应用后台执行时使用。
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    job_defaults=job_defaults,
                                    daemonic=False)
    scheduler.remove_all_jobs()
    scheduler.add_job(Auto_DelLocalCache, 'interval', days=cache_time)
    ### weeks,days,hours,minutes,seconds,start_date,end_date,timezone,jitter(最多提前或延迟作业几秒钟)
    scheduler.start()
    print("[COS]Start Background schedule")

    log_cos.info("Module COS loaded")
示例#12
0
import sys
import taskscheduler as tsc
from apscheduler.schedulers.background import BackgroundScheduler
import time
import os

PidFile = r'C:\Users\jgurry\ProjectCode\PythonProjects\Omitron Interview\PIDs.txt'
FileName = sys.argv[1]
TaskLocation = sys.argv[2]
TaskInterval = int(sys.argv[3])
TaskName = sys.argv[4]
Identifier = TaskInterval

# sheduling task run
sched = BackgroundScheduler(daemon=True)  #configuring the scheduler
sched.add_job(tsc.check_and_run_int,
              'interval',
              seconds=TaskInterval,
              id='myJobId',
              args=[FileName, TaskInterval, TaskLocation, TaskName])
sched.start()  # starts the scheduler

TaskPid = os.getpid()
tsc.write_pid(TaskPid, TaskName, Identifier, PidFile)
while True:
    time.sleep(1)
示例#13
0
    def __init__(self,
                 token: str,
                 redis: str = "redis://127.0.0.1:6379/0",
                 superadmin: int = None,
                 bosses_file: str = None,
                 bosses_expiration: int = 12,
                 gyms_file: str = None,
                 gyms_expiration: int = 12,
                 debug_folder: str = None):
        # Init and test redis connection
        self._redis = StrictRedis.from_url(url=redis,
                                           charset="utf-8",
                                           decode_responses=False)

        _LOGGER.info("Try to connect to Redis...")
        try:
            self._redis.ping()
        except exceptions.ConnectionError:
            _LOGGER.critical("Unable to connect to Redis")
            sys.exit()
        _LOGGER.info("Successfully connected to Redis")

        # Save superadmin
        self._superadmin = int(superadmin) if superadmin is not None else None
        # Add superadmin to the admins db
        if self._superadmin is not None:
            self._redis.set(redis_keys.SUPERADMIN, self._superadmin)
            self._redis.sadd(redis_keys.ADMIN, self._superadmin)

        # Save debug folder
        self._debug_folder = debug_folder
        if self._debug_folder is not None:
            self._debug_folder = os.path.abspath(debug_folder)
            ScreenshotRaid.debug = True
            _LOGGER.info("\"{}\" was set as debug folder".format(
                self._debug_folder))

        # Init the bot
        self._bot = Bot(token)

        # Init updater
        self._updater = Updater(bot=self._bot, use_context=True)

        # Get the id of the bot
        self._id = self._bot.get_me().id

        # Set the handler functions
        # Set the handler for screens
        self._updater.dispatcher.add_handler(
            MessageHandler(Filters.photo, self._handler_screenshot))
        # Set the handler to set the hangout
        self._updater.dispatcher.add_handler(
            MessageHandler(
                Filters.reply
                & Filters.regex(r"^\s*[0-2]?[0-9][:.,][0-5]?[0-9]\s*$"),
                self._handler_set_hangout))
        # Set the handler for the buttons
        self._updater.dispatcher.add_handler(
            CallbackQueryHandler(self._handler_buttons))
        # Set the handler for the pinned message notify
        self._updater.dispatcher.add_handler(
            MessageHandler(Filters.status_update.pinned_message,
                           self._handler_event_pinned))
        # Set the handler to set the boss
        self._updater.dispatcher.add_handler(
            MessageHandler(Filters.reply & Filters.regex(r"^\s*[a-zA-Z]+\s*$"),
                           self._handler_set_boss))

        # Set the handler for about commands
        self._updater.dispatcher.add_handler(
            CommandHandler("start", self._handler_command_about))
        self._updater.dispatcher.add_handler(
            CommandHandler("about", self._handler_command_about))
        # Set the handler for scan command
        self._updater.dispatcher.add_handler(
            CommandHandler("scan", self._handler_command_scan))
        # Set the handler for enablechat command
        self._updater.dispatcher.add_handler(
            CommandHandler("enablechat", self._handler_command_enablechat))
        # Set the handler for disablechat command
        self._updater.dispatcher.add_handler(
            CommandHandler("disablechat", self._handler_command_disablechat))
        # Set the handler for enablescan command
        self._updater.dispatcher.add_handler(
            CommandHandler("enablescan", self._handler_command_enablescan))
        # Set the handler for disablescan command
        self._updater.dispatcher.add_handler(
            CommandHandler("disablescan", self._handler_command_disablescan))
        # Set the handler for addadmin command
        self._updater.dispatcher.add_handler(
            CommandHandler("addadmin", self._handler_command_addadmin,
                           Filters.reply))
        # Set the handler for removeadmin command
        self._updater.dispatcher.add_handler(
            CommandHandler("removeadmin", self._handler_command_removeadmin,
                           Filters.reply))
        # Set the handler for check command
        self._updater.dispatcher.add_handler(
            CommandHandler("check", self._handler_command_check,
                           Filters.reply))

        # Set the handler for the errors
        self._updater.dispatcher.add_error_handler(self._handler_error)

        # Creates background scheduler for update the db
        self._scheduler = BackgroundScheduler(daemon=True)

        # Creates job to update bosses list
        if bosses_file is not None:
            bosses.load_from(bosses_file)
            self._scheduler.add_job(lambda: bosses.load_from(bosses_file),
                                    'interval',
                                    hours=int(bosses_expiration))

        # Creates job to update gyms list
        if gyms_file is not None:
            gyms.load_from(gyms_file)
            self._scheduler.add_job(lambda: gyms.load_from(gyms_file),
                                    'interval',
                                    hours=int(gyms_expiration))

        # Starts the scheduler
        self._scheduler.start()

        _LOGGER.info("Bot ready")
示例#14
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(myFunc, 'interval', minutes=1)
    scheduler.start()
示例#15
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(VideoDataFetchHandler().fetch_and_insert_video_data,
                      'interval',
                      seconds=10)
    scheduler.start()
示例#16
0
})

ldap_client = Server(environ.get("LDAP_SERVER"),
                     get_info=ALL) if USE_LDAP else None

login_manager = LoginManager()
login_manager.session_protection = "strong"

mail_client = Mail()

scheduler = BackgroundScheduler({
    "apscheduler.jobstores.default": {
        "type": "sqlalchemy",
        "url": "sqlite:///jobs.sqlite",
    },
    "apscheduler.executors.default": {
        "class": "apscheduler.executors.pool:ThreadPoolExecutor",
        "max_workers": "50",
    },
    "apscheduler.job_defaults.misfire_grace_time": "5",
    "apscheduler.job_defaults.coalesce": "true",
    "apscheduler.job_defaults.max_instances": "3",
})
scheduler.start()

tacacs_client = (TACACSClient(environ.get("TACACS_ADDR"), 49,
                              environ.get("TACACS_PASSWORD"))
                 if USE_TACACS else None)

vault_client = VaultClient()
import json
import os
import random
import requests
from apscheduler.schedulers.background import BackgroundScheduler

from Compost import app

# The "apscheduler." prefix is hard coded
scheduler = BackgroundScheduler({
    'apscheduler.jobstores.default': {
        'type': 'sqlalchemy',
        'url': 'sqlite:///' + app.root_path + os.sep + 'example1.sqlite'
    },
    'apscheduler.executors.default': {
        'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
        'max_workers': '1'
    },
    'apscheduler.job_defaults.coalesce': 'false',
    'apscheduler.job_defaults.max_instances': '1',
    'apscheduler.timezone': 'Europe/Athens',
})

# def initmeas():
# url1 = sys.argv[1] if len(sys.argv) > 1 else 'sqlite:///' + app.root_path + os.sep + 'example1.sqlite'
# scheduler.add_jobstore('sqlalchemy', url=url1)


def add_measurements():
    m_type = [
        "soil_temp", "soil_hum", "air_temp", "air_hum", "outside_temp",
示例#18
0
                        stype=stype,
                        query='-'.join(clean_text(query.split('-')))))
        except:  # job not in queue
            return redirect(
                url_for('index.loading',
                        stype=stype,
                        query='-'.join(clean_text(query.split('-')))))


@bp.route('/results/<stype>/<query>', methods=['GET'])
def check_results(stype, query):
    try:
        job = Job.fetch(stype + '/' + query, connection=conn)
    except:
        return "nay", 202
    if job.is_finished:
        return "job done", 200
    return "nay", 202


sched = BackgroundScheduler({'apscheduler.timezone': 'America/Toronto'},
                            daemon=True)


@sched.scheduled_job('interval', hours=12)
def timed_update():
    job = q.enqueue_call(get_recent,
                         result_ttl=0,
                         failure_ttl=0,
                         job_id="check_recent")
示例#19
0
#  You should have received a copy of the GNU General Public License
#  along with Tautulli.  If not, see <http://www.gnu.org/licenses/>.

import os
import time

from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger

import plexpy
import database
import logger
import newsletters


NEWSLETTER_SCHED = BackgroundScheduler()


def add_newsletter_each(newsletter_id=None, notify_action=None, **kwargs):
    if not notify_action:
        logger.debug(u"Tautulli NewsletterHandler :: Notify called but no action received.")
        return

    data = {'newsletter': True,
            'newsletter_id': newsletter_id,
            'notify_action': notify_action}
    data.update(kwargs)
    plexpy.NOTIFY_QUEUE.put(data)


def schedule_newsletters(newsletter_id=None):
示例#20
0
from flask import Flask
from apscheduler.schedulers.background import BackgroundScheduler
import os
import platform

app = Flask(__name__)
app.config.from_object('config')

from app import views

if platform.system() == 'Linux':
    if not os.path.exists(os.getcwd() + '/' + app.config['DATABASE']):
        views.init_db()
elif platform.system() == 'Windows':
    if not os.path.exists(os.getcwd() + '\\' +
                          app.config['DATABASE'].replace('/', '\\')):
        views.init_db()
else:
    views.init_db()

sched = BackgroundScheduler(timezone='Europe/Moscow')
sched.add_job(views.my_scheduled_job, 'interval', hours=1)
sched.start()
示例#21
0
 def __init__(self):
     # self.scheduler = BackgroundScheduler(daemonic = True) # daemonic = False,
     # self.scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=pytz.timezone('Asia/Shanghai'))
     self.scheduler = BackgroundScheduler(APS_SCHEDULER_CONFIG)
示例#22
0
class DccChannel(object):
    # Class constants:
    FORWARDS = 1
    BACKWARDS = -1

    MAX_ADDR = 30
    assert MAX_ADDR < 127
    BYTES_PER_CHANNEL = 16

    # Class attributes:
    scheduler = BackgroundScheduler()
    run_buffer = DmaBuffer(MAX_ADDR * BYTES_PER_CHANNEL, 0)

    # Phase Two of class definition (by which stage the class object has been created)
    @classmethod
    def __phase2__(clazz):
        # The singleton status of *all* channels
        clazz._channels = [{
            "throttle":
            0,
            "speedAdjusterJob":
            clazz.scheduler.add_job(clazz._adjustSpeeds,
                                    "interval",
                                    seconds=1,
                                    args=[addr]).pause(),
        } for addr in range(clazz.MAX_ADDR)]
        # The array starts at index 0 (duh) which is not a drivable channel,
        # but it can be safely ignored

        atexit.register(lambda: clazz.scheduler.shutdown(wait=False))
        clazz.scheduler.start()

    def peek_instruction(buf, fromAddr):
        offset = fromAddr * DccChannel.BYTES_PER_CHANNEL
        instBytes = buf[offset:offset + DccChannel.BYTES_PER_CHANNEL]
        (instAddr, instType, (direction, speed)) = dcclib.decode_instruction(
            dcclib.decode_stream(
                dcclib.decode_signal(dcclib.to_binary_array(instBytes))))
        assert instAddr == fromAddr
        assert instType == "SPEED"
        assert direction in [True, False]
        assert speed >= 0
        assert speed <= 28
        return (direction, speed)

    def poke_instruction(buf, addr, instruction):
        offset = addr * DccChannel.BYTES_PER_CHANNEL
        bytes_to_poke = dcclib.to_bytes(
            dcclib.round_up(dcclib.to_signal(dcclib.to_stream(instruction))))
        assert len(bytes_to_poke) <= DccChannel.BYTES_PER_CHANNEL
        buf[offset:offset + len(bytes_to_poke)] = bytes_to_poke

    def __init__(self, addr):
        # Instance attributes:
        self._addr = int(addr)
        assert self._valid()

    def __repr__(self):
        return "DccChannel[addr=%s,%d%s]" % (self.addr, self.speed,
                                             ">" if self.direction
                                             == DccChannel.FORWARDS else "<")

    def _valid(self):
        assert self._addr > 0
        assert self._addr <= DccChannel.MAX_ADDR
        assert self._channels[self._addr]["throttle"] >= 0
        assert self._channels[self._addr]["throttle"] <= 28
        return True

    @property
    def addr(self):
        assert self._valid()
        return self._addr

    @property
    def direction(self):
        assert self._valid()
        (direction,
         speed) = DccChannel.peek_instruction(DccChannel.run_buffer,
                                              self._addr)
        return DccChannel.FORWARDS if direction else DccChannel.BACKWARDS

    @direction.setter
    def direction(self, direction):
        assert self._valid()
        assert direction in [DccChannel.FORWARDS,
                             DccChannel.BACKWARDS], str(direction)
        (prevDirection,
         speed) = DccChannel.peek_instruction(DccChannel.run_buffer,
                                              self._addr)
        if (speed == 0):
            DccChannel.poke_instruction(
                DccChannel.run_buffer, self._addr,
                dcclib.speed_instruction(self._addr,
                                         direction == DccChannel.FORWARDS, 0))

    @property
    def speed(self):
        assert self._valid()
        (direction,
         speed) = DccChannel.peek_instruction(DccChannel.run_buffer,
                                              self._addr)
        return speed

    @speed.setter
    def speed(self, speed):
        assert self._valid()
        assert speed >= 0
        assert speed <= 28
        (direction,
         prevSpeed) = DccChannel.peek_instruction(DccChannel.run_buffer,
                                                  self._addr)
        DccChannel.poke_instruction(
            DccChannel.run_buffer, self._addr,
            dcclib.speed_instruction(self._addr, direction, speed))

    def _setSpeed(self, speed):
        (direction,
         prevSpeed) = DccChannel.peek_instruction(DccChannel.run_buffer,
                                                  self._addr)
        DccChannel.poke_instruction(
            DccChannel.run_buffer, self._addr,
            dcclib.speed_instruction(self._addr, direction, speed))

    @property
    def throttle(self):
        assert self._valid()
        return self._channels[self._addr]["throttle"]

    @throttle.setter
    def throttle(self, throttle):
        assert self._valid()
        assert throttle >= 0
        assert throttle <= 28
        self._setSpeedAdjuster(throttle)
        self._channels[self._addr]["throttle"] = throttle

    def _setSpeedAdjuster(self, destinationSpeed):
        # How does the required speed compare to the current speed...
        currentSpeed = self.speed

        # Work out how to adjust the speed, if at all
        requiredAdjustment = destinationSpeed - currentSpeed
        # Also work out the current adjustment that's being applied
        currentAdjustment = self.throttle - currentSpeed

        if requiredAdjustment == 0:
            # We're now going at the desired speed
            # Scheduled callback will handle this and tidy up when it next runs
            return

        # Compare the signs on the adjustments, by seeing if they cancel when multiplied
        posNegOrZero = requiredAdjustment * currentAdjustment
        if posNegOrZero > 0:
            # Not yet going at the right speed, but the required speed
            # is in the same "direction" as the current target speed
            # No need to do anything - existing schedule will
            # continue to work for us
            return

        # Either we're starting to move, or we're changing between accelerating and decelerating
        self._channels[self._addr]["speedAdjusterJob"].reschedule("interval",
                                                                  seconds=0.2)

    @classmethod
    def _adjustSpeeds(clazz, addr):
        self = DccChannel(addr)
        currentSpeed = self.speed
        destinationSpeed = self._channels[self._addr]["throttle"]
        # Work out how to adjust the speed, if at all
        adjustment = sign(destinationSpeed - currentSpeed)
        # If speed needs adjusting, adjust it
        if adjustment != 0:
            currentSpeed = currentSpeed + adjustment
            self.speed = currentSpeed
        # If now going at the right speed, pause the speedAdjuster
        if currentSpeed == destinationSpeed:
            self._channels[self._addr]["speedAdjusterJob"].pause()
示例#23
0
def init_scheduler():
    sched = BackgroundScheduler(daemon=True)
    sched.add_job(populate_stats, 'interval',
                  seconds=time_interval)
    sched.start()
示例#24
0
import datetime
from apscheduler.schedulers.background import BackgroundScheduler

plant = PlantBoxSetup()
plant.addPin("P0", "moist_sensor", 'i', range=[1.43, 2.79])
plant.addPin(16, "servo_pump", 'o', servo=True)

#consumer key, consumer secret, access token, access secret.
ckey=""
csecret=""
atoken=""
asecret=""

twitter = Twitter(ckey, csecret, atoken, asecret, plant)

sched = BackgroundScheduler({'apscheduler.timezone':  'America/Denver'})

def plantCheck():
    twitterRunTime = 0
    while True:
        plant.update()
        print("Soil Moisture Sensor: " + str(plant.getPinInfo("P0")['data'])+"%")
        if plant.getPinInfo("P0")['data'] < 85:
            print("Plant needs water, Turning Pump on for 5 seconds...")
            plant.outputToggle(16)
            time.sleep(2.5)
            print("Pump Off...")
            plant.outputToggle(16)
            twitter.tweet("Just watered.\nAt "+twitter.getFormattedTime())
        twitterRunTime = twitterRunTime + 1
        if twitterRunTime > 100:
示例#25
0
 def initialize():
     log.info("Initializing clean-up task")
     scheduler = BackgroundScheduler()
     scheduler.start()
     scheduler.add_job(cleanup_database, 'interval', hours=12)
示例#26
0
import logging
logging.basicConfig()
from main.MU_update import MU_UpdateData, PrepareLogin
from main.MU_utils import r
import socket
import sys
reload(sys)
sys.setdefaultencoding('utf8')
jobstores = {'redis': RedisJobStore()}
excutors = {
    'default': ThreadPoolExecutor(20),
    'processpool': ProcessPoolExecutor(5)
}
job_defaults = {'coalesce': False, 'max_instances': 3}
scheduler = BackgroundScheduler(jobstores=jobstores,
                                excutors=excutors,
                                job_defaults=job_defaults,
                                daemonic=False)


def push():
    update = MU_UpdateData()
    PrepareLogin()
    update.PostItem()


def check_update():
    update = MU_UpdateData()
    update.SaveRecentChanges()
    update.GetItemToSend()

示例#27
0
                                                   current_price["시가"], "2",
                                                   "00")
            buy_order = buy_order_list[0]
            buy_order["amoun"] = 2
            mongo.insert_item(buy_order, "stocklab_test", "order")
            sell_order_list = ebest_ace.order_stock(code, "1",
                                                    current_price["종가"], "1",
                                                    "00")
            sell_order = sell_order_list[0]
            sell_order["amount"] = 1
            mongo.insert_item(sell_order, "stocklab_test", "order")
        tick += 1


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    codes = ["180640", "005930"]
    day = datetime.now() - timedelta(days=4)
    day = day.strftime("%Y%m%d")
    print(day)
    scheduler.add_job(func=run_process_trading_scenario,
                      trigger="date",
                      run_date=datetime.now(),
                      id="test",
                      kwargs={
                          "code_list": codes,
                          "date": day
                      })
    scheduler.start()

    while True:
示例#28
0
CURRENT_VERSION = None
LATEST_VERSION = None
COMMITS_BEHIND = None
LOCAL_IP = None
DOWNLOAD_APIKEY = None
APILOCK = False
SEARCHLOCK = False
CMTAGGER_PATH = None
STATIC_COMICRN_VERSION = "1.01"
STATIC_APC_VERSION = "2.04"
SAB_PARAMS = None
COMICINFO = []
SCHED = BackgroundScheduler({
                             'apscheduler.executors.default': {
                                 'class':  'apscheduler.executors.pool:ThreadPoolExecutor',
                                 'max_workers': '20'
                             },
                             'apscheduler.job_defaults.coalesce': 'true',
                             'apscheduler.job_defaults.max_instances': '3',
                             'apscheduler.timezone': 'UTC'})



def initialize(config_file):
    with INIT_LOCK:

        global CONFIG, _INITIALIZED, QUIET, CONFIG_FILE, OS_DETECT, MAINTENANCE, CURRENT_VERSION, LATEST_VERSION, COMMITS_BEHIND, INSTALL_TYPE, IMPORTLOCK, PULLBYFILE, INKDROPS_32P, \
               DONATEBUTTON, CURRENT_WEEKNUMBER, CURRENT_YEAR, UMASK, USER_AGENT, SNATCHED_QUEUE, NZB_QUEUE, PP_QUEUE, SEARCH_QUEUE, PULLNEW, COMICSORT, WANTED_TAB_OFF, CV_HEADERS, \
               IMPORTBUTTON, IMPORT_FILES, IMPORT_TOTALFILES, IMPORT_CID_COUNT, IMPORT_PARSED_COUNT, IMPORT_FAILURE_COUNT, CHECKENABLED, CVURL, DEMURL, WWTURL, \
               USE_SABNZBD, USE_NZBGET, USE_BLACKHOLE, USE_RTORRENT, USE_UTORRENT, USE_QBITTORRENT, USE_DELUGE, USE_TRANSMISSION, USE_WATCHDIR, SAB_PARAMS, \
               PROG_DIR, DATA_DIR, CMTAGGER_PATH, DOWNLOAD_APIKEY, LOCAL_IP, STATIC_COMICRN_VERSION, STATIC_APC_VERSION, KEYS_32P, AUTHKEY_32P, FEED_32P, FEEDINFO_32P, \
               MONITOR_STATUS, SEARCH_STATUS, RSS_STATUS, WEEKLY_STATUS, VERSION_STATUS, UPDATER_STATUS, DBUPDATE_INTERVAL, LOG_LANG, LOG_CHARSET, APILOCK, SEARCHLOCK, LOG_LEVEL, \
	  tempo          = 'hourly'
	) # end send mail
# end def

def send_daily_emails():
	return alerter_server.send_alerts_email.main(
	  conn_str       = db_conn_str,
	  email_api_key  = email_api_key,
	  email_provider = email_provider,
	  sender_email   = sender_email,
	  tempo          = 'daily'
	) # end send mail
# end def

# init a scheduler
scheduler = BackgroundScheduler({'apscheduler.timezone': timezone})
scheduler.start()
# - add hourly job
scheduler.add_job(send_hourly_emails, CronTrigger.from_crontab('0 * * * *'))
# - add daily job
scheduler.add_job(send_daily_emails,  CronTrigger.from_crontab('0 18 * * *')) # TODO: make configurable


# init engine
engine = sqlalchemy.create_engine(db_conn_str)
controller = alertController(db_conn_str)
Session = sessionmaker(bind=engine)

def put_db(inputs):
	tblname = inputs.pop('tblname')
	acceptables = ['title', 'msg', 'channel', 'alert_uuid', 'is_processed', 'processed_at', 'tempo']
示例#30
0
 def init():
     if not ScheduleManager.base_scheduler:
         ScheduleManager.base_scheduler = BackgroundScheduler(daemon=True)
         ScheduleManager.base_scheduler.start()