Exemple #1
0
def main():
    # Clean
    logger.info("Clean memcached before init")
    memcached_host, memcached_port = get_memcached_config()
    mem_nfv = MemcachedNFV(memcached_host, memcached_port)
    mem_nfv.clean_memcached()
    mem_nfv.disconnect()
    # Init server list
    init_server_cached_list_api()

    # Scheduler for get statistic
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    interval = int(data_config['interval'])

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors, job_defaults=job_defaults)
    scheduler.add_job(scheduler_get_statistic_job,
                      'interval',
                      seconds=interval)
    scheduler.start()
    def __init__(self, onTime, offTime):

        self.onTime = onTime
        self.offTime = offTime
        self.updateInterval = 6
        self.webPush = False
        self.relayStatesA = []
        self.relayStatesD = {}
        logging.basicConfig()
        #Call fetchUpdate every 6 hours

        print('initting AIHome...scheduling job')
        sched = BlockingScheduler()

        @sched.scheduled_job('interval', hours=1)
        def timed_job():
            print(
                'This job runs every 6 hrs. timed_job gets called or something else'
            )
            #call fetchUpdate()
            self.fetchUpdate()

        #RUNS AT DAY&TIME SPECIIFIED###########################
        #@sched.scheduled_job('cron', day_of_week='mon-fri', hour=12)
        #def scheduled_job():
        #	print('This job is run every weekday at 12pm.')
        #######################################################

        sched.configure()
        #options_from_ini_file
        sched.start()
Exemple #3
0
def main():
    scheduler = BlockingScheduler()
    scheduler.configure(job_defaults=dict(max_instances=1, coalesce=True))
    # scheduler.add_job(refresh_job, 'interval', minutes=1)
    scheduler.add_job(refresh_job, 'cron', day_of_week=1, hour=4, minute=30)
    scheduler.add_job(case_status_job, 'cron', day_of_week=1, hour=16)
    scheduler.add_job(smart_job, 'cron', day_of_week=3, hour=0)
    scheduler.add_job(smart_date_job, 'cron', day_of_week=5, hour=8)
    scheduler.start()
Exemple #4
0
def run_scheduler():
    log = LogHelper("scheduler")
    log.info("scheduler start")

    scheduler = BlockingScheduler(logger=log)

    scheduler.add_job(run_proxy_fetch, 'interval', minutes=4, id="proxy_fetch", name="proxy采集")
    scheduler.add_job(run_proxy_check, 'interval', minutes=2, id="proxy_check", name="proxy检查")

    executors = {
        'default': {'type': 'threadpool', 'max_workers': 20},
        'processpool': ProcessPoolExecutor(max_workers=4)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 10
    }

    scheduler.configure(executors=executors, job_defaults=job_defaults)
    scheduler.start()
Exemple #5
0
class SchedulerManager:
    def __init__(self):
        if config.ENVIRONMENT == "test":
            self.scheduler = BackgroundScheduler()
        else:
            self.scheduler = BlockingScheduler()

    def init(self, websites, func):
        self.scheduler.configure(timezone="utc")
        for website in websites:
            self.scheduler.add_job(
                func,
                "interval",
                seconds=website.get("interval"),
                id=website.get("url"),
                kwargs={"url": website.get("url"), "regexp_rules": website.get("regexp_rules")},
            )

        self.scheduler.start()

    def get_jobs(self):
        return self.scheduler.get_jobs()
Exemple #6
0
    data.next_run_at = datetime.datetime.now() + datetime.timedelta(hours=4)
    d = data.next_run_at
    writeDatefile(data)
    sched.add_job(main, trigger='date', id='cron_main_at_%s' %
                  (d.isoformat()), run_date=data.next_run_at)


def start():
    sched.start()


executors = {
    'default': {'type': 'threadpool', 'max_workers': 20},
    'processpool': ProcessPoolExecutor(max_workers=5)
}
sched.configure(executors=executors)
if __name__ == "__main__":
    t = threading.Thread(target=start, args=())
    t.start()
    next_run_at = readDatefile('next_run_at')
    if Config.getboolean("bot", "startonstartup"):
        next_run_at = datetime.datetime.now() + datetime.timedelta(seconds=1)
    elif next_run_at == None:
        next_run_at = datetime.datetime.now() + datetime.timedelta(seconds=5)
    elif datetime.datetime.now() > next_run_at:
        next_run_at = datetime.datetime.now() + datetime.timedelta(seconds=5)
    else:
        nextAt = next_run_at - datetime.datetime.now()
        next_run_at = datetime.datetime.now(
        ) + datetime.timedelta(seconds=nextAt.total_seconds())
    sched.add_job(main, trigger='date', id='cron_main_at_%s' %
Exemple #7
0

def periodic_task_one():
    print('periodic_task_one on {}'.format(datetime.now()))
    with ThreadPoolExecutor(max_workers=100) as executor:
        future_dict = {}
        for i in range(0, 20):
            future = executor.submit(task_one, i, i * 10, i * 100)
            future_dict[future] = i

        for f in as_completed(future_dict):
            LOG.info('Index: {}, result: {}'.format(str(future_dict[f]),
                                                    str(f.result())))


def periodic_task_two():
    LOG.info('periodic_task_two on {}'.format(datetime.now()))


def periodic_task_three():
    LOG.info('periodic_task_three on {}'.format(datetime.now()))


executors = {'default': ProcessPoolExecutor(max_workers=3)}
scheduler = BlockingScheduler()
scheduler.configure(executors=executors)

scheduler.add_job(periodic_task_one, 'interval', seconds=2)
scheduler.add_job(periodic_task_two, 'interval', seconds=5)
scheduler.add_job(periodic_task_three, 'interval', seconds=10)
from pytz import utc
from apscheduler.schedulers.blocking import BlockingScheduler
from application import tweetry_manager

scheduler = BlockingScheduler()
scheduler.configure(timezone=utc)

#Scheduled to run everyday at 12:00am UTC
@scheduler.scheduled_job('cron', hour=0)
def cron_job():
    print(f'Scheduler: Tweeting: { tweetry_manager.tweet_final_quote() }')
    tweetry_manager.create_new_tweetry()
    print('Scheduler: A new tweetry has been created.')

scheduler.start()
Exemple #9
0
# apsched = BackgroundScheduler()
# apsched.configure({'apscheduler.timezone': 'UTC'})

# # apsched.add_job(schedule_task.run_detect_gap,'cron', day_of_week='mon-fri', hour=20, minute=30)
# # apsched.add_job(schedule_task.run_get_data_candidate, 'cron', day_of_week='mon-fri', hour=21, minute=00)
# # apsched.add_job(schedule_task.run_get_news_gap,'cron', day_of_week='mon-fri', hour=21, minute=10)
# # apsched.add_job(schedule_task.sync_s3,'cron', day_of_week='mon-fri', hour=21, minute=30)

# apsched.add_job(daily_update,'cron', day_of_week='mon-fri', hour=20, minute=15)
# apsched.add_job(test,'cron', day_of_week='mon-fri', hour=3, minute=52)

# #	For testing	#
# # #apsched.add_cron_job(schedule_task.run_detect_gap, day_of_week='mon-sun', hour=23, minute=17)
# # # apsched.add_cron_job(schedule_task.run_get_data_candidate, day_of_week='mon-fri', hour=23, minute=48)
# #apsched.add_cron_job(schedule_task.sync_s3, day_of_week='mon-sun', hour=3, minute=41)
# apsched.start()
# print("started!")

sched = BlockingScheduler()
sched.configure({'apscheduler.timezone': 'UTC'})


@sched.scheduled_job('cron', day_of_week='mon-fri', hour=20, minute=30)
def scheduled_job():
    print("Start updating")
    daily_update()
    print("Done")


sched.start()
Exemple #10
0
logging.getLogger('apscheduler').setLevel(logging.INFO)

# 스케줄러 생성
#   - Foreground로 실행
scheduler = BlockingScheduler()  #

# 스케줄러 설정
scheduler.configure(
    # 저장소 설정
    jobstores={"default": MemoryJobStore()},
    # 실행자 설정
    executors={
        "default": ThreadPoolExecutor(20),
        "processpool": ProcessPoolExecutor(5)
    },
    # JOB 기본설정
    job_defaults={
        "coalesce":
        False,  # 기본값은 True이며, Scheduler에 의해 작업이 여러번 실행되야하는 경우 통합하여 한번만 실행.
        "max_instances": 3
    },
    # Timezone 설정
    timezone=get_localzone(),  # "Asia/Seoul"
    daemon=True)


# JOB Task 생성
#   - 입력받은 텍스트를 출력
def execute(text):
    print(text)
Exemple #11
0
class JobScheduler(object):
    def __init__(self, every=30, unit='second'):
        self.mongo = mongopool.get()
        self.cursor = self.mongo.get_database('apscheduler').get_collection(
            'jobs')
        self.every = every
        self.unit = unit
        self.scheduler = BlockingScheduler(logger=logger)
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=pytz.timezone('Asia/Saigon'))
        self._set_trigger(every, unit)

    def _set_trigger(self, every, unit):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        if unit == 'second':
            self.trigger = CronTrigger(second='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'minute':
            self.trigger = CronTrigger(minute='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'hour':
            self.trigger = CronTrigger(hour='*/{}'.format(every),
                                       start_date=now)
        elif unit == 'day':
            self.trigger = CronTrigger(day='*/{}'.format(every),
                                       start_date=now)
        else:
            raise Exception(message='Unknown time unit')

    def add_jobstore(self, jobstore, alias):
        self.scheduler.add_jobstore(jobstore, alias)

    def add_executor(self, executor, alias):
        self.scheduler.add_executor(executor, alias)

    def add_job(self,
                job_fn,
                id='id1',
                name='job1',
                jobstore='default',
                executor='default',
                args=None,
                kwargs=None):
        now = datetime.now().astimezone(pytz.timezone('Asia/Saigon'))
        history = list(self.cursor.find({'_id': id}))
        if history:
            #TODO: process missing jobs
            self.cursor.delete_one({'_id': id})
        next_run_time = self.trigger.get_next_fire_time(None, now)
        if kwargs:
            kwargs['run_time'] = next_run_time
        else:
            kwargs = {'run_time': next_run_time}

        self.scheduler.add_job(job_fn,
                               trigger=self.trigger,
                               next_run_time=next_run_time,
                               id=id,
                               name=name,
                               jobstore=jobstore,
                               executor=executor,
                               args=args,
                               kwargs=kwargs)

    def remove_job(self, id, jobstore='default'):
        self.scheduler.remove_job(job_id=id, jobstore=jobstore)

    def callback(self, callback_fn, mark=EVENT_ALL):
        self.scheduler.add_listener(callback_fn)

    def start(self):
        mongopool.put(self.mongo)
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.scheduled_job
Exemple #12
0
import telebot
#import config
import dm_function
from apscheduler.schedulers.blocking import BlockingScheduler
from pytz import timezone

bot_token = #ask me for it
openW_api = #ask me for it
#Bochum lon and lat
lat = '51'
lon = '7'

bot = telebot.TeleBot(bot_token)
#TODO: get the id from new starts
chat_id = -355590034 #for the 'Daymmon-group' in Bochum

#TODO: get times from the host device
cest = timezone('Europe/Berlin')
sched = BlockingScheduler()
sched.configure(timezone=cest)
@sched.scheduled_job('cron', day_of_week='mon-sun', hour=8)
def scheduled_job():
    out_message1 = dm_function.daymoon()
    if out_message1 != '--':
        out_message2 = dm_function.weather(openW_api, lon, lat)
        if out_message2 != '--':
            bot.send_message(chat_id, out_message1+out_message2)

sched.start()
Exemple #13
0
import os
import datetime
from apscheduler.schedulers.blocking import BlockingScheduler
scheduler = BlockingScheduler()

@scheduler.scheduled_job('interval', minutes=1, start_date=datetime.datetime.now() + datetime.timedelta(0,3))
def run_eval():
    command = 'export CUDA_VISIBLE_DEVICES=0; ' \
              '/usr/bin/python ' \
              'bts_eval.py ' \
              '--encoder densenet161_bts ' \
              '--dataset kitti ' \
              '--data_path ../../dataset/kitti_dataset/ ' \
              '--gt_path ../../dataset/kitti_dataset/data_depth_annotated/ ' \
              '--filenames_file ../train_test_inputs/eigen_test_files_with_gt.txt ' \
              '--input_height 352 ' \
              '--input_width 1216 ' \
              '--garg_crop ' \
              '--max_depth 80 ' \
              '--max_depth_eval 80 ' \
              '--output_directory ./models/eval-eigen/ ' \
              '--model_name bts_eigen_v0_0_1 ' \
              '--checkpoint_path ./models/bts_eigen_v0_0_1/ ' \
              '--do_kb_crop '

    print('Executing: %s' % command)
    os.system(command)
    print('Finished: %s' % datetime.datetime.now())

scheduler.configure()
scheduler.start()
jobstores = {
	'default': MemoryJobStore()
}
executors = {
	'default': {'type': 'threadpool', 'max_workers': 5},
}
job_defaults = {
	'coalesce': True,
	'max_instances': 3
}
scheduler = BlockingScheduler()

# .. do something else here, maybe add jobs etc.

scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

def minutes(count):
	return count * 60
def hours(count):
	return count * minutes(60)
def days(count):
	return count * hours(24)


def flatten_series_by_url():
	with app.app_context():
		api_handlers_admin.flatten_series_by_url(None, admin_override=True)
def consolidate_rrl_items():
	with app.app_context():
		api_handlers_admin.consolidate_rrl_items(None, admin_override=True)
Exemple #15
0
    sys.exit(1)

image9 = os.getenv('IMAGE9', None)
if image9 is None:
    print('IMAGE9 as environment variable.')
    sys.exit(1)

line_bot_api = LineBotApi(channel_access_token)
parser = WebhookParser(channel_secret)

# simulate day
my_date = date.today()

# Start the scheduler
sched = BlockingScheduler()
sched.configure(timezone=timezone('Asia/Hong_Kong'))


@sched.scheduled_job('cron', hour=5)
def checkAndSend():
    global my_date
    date_string = my_date.strftime('%Y-%m-%d')
    print(date_string)

    image_link = None
    text_msg = date_string + ':無'

    s = date(2017, 7, 25)
    if (my_date - s).days % 14 == 0:
        image_link = image1
        text_msg = date_string + '要打齊天大聖囉'
Exemple #16
0
    for station in velib_stations:
        tag = get_tag(station)
        bike_nb = station["nbBike"]
        ebike_nb = station["nbEbike"]
        free_dock_nb = station["nbFreeDock"] + station["nbFreeEDock"]
        dock_nb = station["nbDock"] + station["nbEDock"]
        statsd.gauge("velib.regular_bikes", bike_nb, tags=tag)
        statsd.gauge("velib.electric_bikes", ebike_nb, tags=tag)
        statsd.gauge("velib.free_docks", free_dock_nb, tags=tag)
        statsd.gauge("velib.total_docks", dock_nb, tags=tag)
        statsd.service_check(
            "velib.station_state",
            get_state(station),
            tags=tag,
            message=station["station"]["state"],
        )
        logger.info(
            "Found {} ebike(s), {} bike(s), {} free dock(s) on {} dock(s). The station is {}"
            .format(ebike_nb, bike_nb, free_dock_nb, dock_nb,
                    station["station"]["state"]),
            extra={"station": get_station_name(station)},
        )


if __name__ == "__main__":
    sched = BlockingScheduler()
    sched.configure(timezone=utc)
    sched.add_job(get_velibs, "cron", second="0")
    sched.start()
Exemple #17
0
stream_handler = logging.StreamHandler()

logger.addHandler(file_handler)
logger.addHandler(stream_handler)

mysql_url = 'mysql+mysqldb://' + settings.DATABASES['default']['USER'] + ':' + \
                                 settings.DATABASES['default']['PASSWORD'] + '@' + \
                                 settings.DATABASES['default']['HOST'] + '/' + \
                                 settings.DATABASES['default']['NAME']
__configure = {
    'apscheduler.standalone': True,
    'apscheduler.jobstores.sqlalchemy_store.class': SQLAlchemyJobStore,
    'apscheduler.jobstores.sqlalchemy_store.engine': create_engine(mysql_url, pool_pre_ping=True)
}
scheduler = BlockingScheduler(timezone=settings.TIME_ZONE)
scheduler.configure(__configure)

api_url   = 'https://api.github.com/repos/'
PATTERN   = r"!\[(\w*|\s|\w+( \w+)*)\]\(([^,:!]*|\/[^,:!]*\.\w+|\w*.\w*)\)"


def db_auto_reconnect(func):
    """Auto reconnect db when mysql has gone away."""
    @wraps(func)
    def wrapper(*args, **kwagrs):
        try:
            connection.connection.ping()
        except Exception:
            connection.close()
        return func(*args, **kwagrs)
    return wrapper
def run_providers():
    scheduler = BlockingScheduler()
    scheduler.configure(
        executors={
            "admin": {"type": "processpool", "max_workers": 1},
            "providers": {"type": "processpool", "max_workers": 2},
        },
        job_defaults={
            "misfire_grace_time": 3 * 60,
            "coalesce": True,  # Reschedule a single job if it failed 3 minutes ago
            "max_instances": 1,  # Only 1 job instance executing concurrently
        },
    )

    # Admin jobs
    scheduler.add_job(
        "admin_stations:delete_stations",
        args=(60, ""),
        trigger="cron",
        hour="3",
        executor="admin",
    )
    scheduler.add_job(
        "admin_clusters:save_clusters",
        args=(50,),
        trigger="cron",
        hour="4",
        executor="admin",
    )

    providers_jobs = [
        ("providers.borntofly:borntofly", 5),
        ("providers.ffvl:ffvl", 5),
        ("providers.fluggruppe_aletsch:fluggruppe_aletsch", 5),
        ("providers.holfuy:holfuy", 5),
        ("providers.iweathar:iweathar", 5),
        ("providers.metar_noaa:metar_noaa", 10),
        ("providers.meteoswiss_opendata:meteoswiss", 5),
        ("providers.pdcs:pdcs", 5),
        ("providers.pioupiou:pioupiou", 5),
        ("providers.romma:romma", 5),
        ("providers.slf:slf", 5),
        ("providers.thunerwetter:thunerwetter", 5),
        ("providers.windline:windline", 5),
        ("providers.windspots:windspots", 5),
        ("providers.yvbeach:yvbeach", 5),
        ("providers.zermatt:zermatt", 5),
    ]

    def schedule_jobs(jobs: List) -> int:
        job_index = 0
        if jobs:
            for minute in range(0, 5):
                for second in range(0, 60, 15):
                    func = jobs[job_index][0]
                    interval = jobs[job_index][1]
                    scheduler.add_job(
                        func,
                        trigger="cron",
                        minute=f"{minute}-59/{interval}",
                        second=second,
                        executor="providers",
                    )
                    job_index += 1
                    if job_index > len(jobs) - 1:
                        return job_index - 1
        return job_index - 1

    last_job_index = schedule_jobs(providers_jobs)
    if last_job_index != len(providers_jobs) - 1:
        raise RuntimeError("Some jobs are not scheduled")

    scheduler.start()
Exemple #19
0
    
else:
        pump_Stop
        dt = datetime.timedelta(365)

        # light cycle (12 hour schedule) using apscheduler let's specific sections of the code run at set time intervals, useful for different cycles

        sched = BlockingScheduler()


        @sched.scheduled_job('cron', day_of_week='mon-sun', hour=12)
        def scheduled_job():
            return


        sched.configure(options_from_ini_file)
        sched.start()

        # pin selction for the pump

        io.setmode(ioBCM)
        io.setup(4, io.OUT)
        while True:
            io.output(4, 0)
            time.sleep(0, 30)
            io.output(4, 1)
            time.sleep(0, 30)

        # pin selction for the lights

        io.setmode(ioBCM)
Exemple #20
0
def scheduler():
    sched = BlockingScheduler()
    sched.configure(timezone='Asia/Seoul')
    sched.add_job(fetch_cryptocompare, 'interval', minutes=1)  #매 분마다 돌리기
    sched.start()
Exemple #21
0
from apscheduler.schedulers.blocking import BlockingScheduler

from controller import Controller
from settings import *

if __name__ == "__main__":
    scheduler = BlockingScheduler()
    scheduler.configure(**SCHEDULER)

    controller = Controller()
    controller.init_scheduler(scheduler)
    controller.run()
Exemple #22
0
scheduler.add_job(job1, 'interval', seconds=5, id='job1')
# 每隔5秒运行一次job2
scheduler.add_job(job2, 'cron', second='*/5', id='job2')


# ###################使用装饰器添加任务#################

# 每隔5秒运行一次job3
@scheduler.scheduled_job('interval', seconds=5, id='job3')
def job3():
    print 'job3 is running, Now is %s' % current_time()


# 每隔5秒运行一次job4
@scheduler.scheduled_job('cron', second='*/5', id='job4')
def job4():
    print 'job4 is running, Now is %s' % current_time()


executors = {
    'processpool': ProcessPoolExecutor(5),
    'default': ThreadPoolExecutor(20)

}
job_defaults = {
    'coalesce': False,
    'max_instances': 5
}
scheduler.configure(executors=executors, job_defaults=job_defaults)
scheduler.start()
from util import flatten_history

jobstores = {'default': MemoryJobStore()}
executors = {
    'default': {
        'type': 'threadpool',
        'max_workers': 5
    },
}
job_defaults = {'coalesce': True, 'max_instances': 3}
scheduler = BlockingScheduler()

# .. do something else here, maybe add jobs etc.

scheduler.configure(jobstores=jobstores,
                    executors=executors,
                    job_defaults=job_defaults,
                    timezone=utc)


def minutes(count):
    return count * 60


def hours(count):
    return count * minutes(60)


def days(count):
    return count * hours(24)

Exemple #24
0
from apscheduler.schedulers.blocking import BlockingScheduler

from rq import Queue
from worker import conn
from client_tracker import update_daemon

q = Queue(connection=conn)

sched = BlockingScheduler()
sched.configure(timezone="America/Denver")


@sched.scheduled_job("cron", day_of_week="sun", hour=0, minute="01")
def scheduled_job():
    print("Starting the update process")
    q.enqueue(update_daemon.start)


sched.start()
Exemple #25
0
def get_date_string(date_object):
    return rfc3339.rfc3339(date_object)


# Function to be run by Scheduler
def callable_func():
    fullTextQuery()


def shutdown(exit_code):
    rootLogger.info('Shutting Down')
    if player_process:
        try:
            player_process.kill()
        except:
            rootLogger.warning('failed to terminate player_process')
    scheduler.shutdown(wait=False)
    exit(exit_code)


if __name__ == '__main__':
    auth()
    # Run scheduler service
    scheduler = BlockingScheduler()
    scheduler.configure(timezone='UTC')
    scheduler.add_job(callable_func, 'interval', seconds=10, max_instances=1)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        shutdown(0)
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FileName  : apscheduler.py
# Author    : wuqingfeng@

import time
import logging
from apscheduler.schedulers.blocking import BlockingScheduler


def run_test():
    print "I am running now..."
    time.sleep(10)
    print "I am end now!"

if __name__ == '__main__':
    logging.basicConfig()
    logging.getLogger('apscheduler').setLevel(logging.DEBUG)
    scheduler = BlockingScheduler()
    scheduler.configure(logger=logging)
    scheduler.add_job(func=run_test, args=(), trigger='interval', jobstore='default',
                      replace_existing=True, seconds=20)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()