class MonkeyHorde(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.twitter = self.get_twitter_connector()
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler, self.twitter)

    def unleash(self):
        if self.twitter:
            try:
                self.twitter.PostUpdate("I unleashed the evil monkey horde!!!")
            except Exception as e:
                log.exception(e)
        self.scheduler.start()

    def get_twitter_connector(self):
        try:
            credentials = self.config_file.items("twitter")
        except ConfigParser.NoSectionError:
            return None
        return twitter.Api(**dict(credentials))
Пример #2
0
def main():
    global databaseFile
    databaseFile = '.database'
    loadDatabase()
    twitterAuth()

    # Scheduler for any different task
    schd = BlockingScheduler()
    '''
    Using a scheduler to check every 5 minutes for new tweets. If you want to add more twitter's profile, just follow example below:

    schd.add_interval_job(checkTwitter, minutes = MIN,  args = ['profile'])
    '''
    schd.add_job(checkTwitter, 'interval', minutes=5, args=['rmtcgoiania'])
    schd.add_job(checkTwitter, 'interval', minutes=5, args=['jornalopcao'])
    ## Using a scheduler to get every 6 hours and 16 hours informations about weather
    schd.add_job(checkWeather, 'cron', hour='6,16', minute=00)
    ## Using a scheduler to get every 6 hours and 16 hours informations about quotation
    schd.add_job(checkQuotation,
                 'cron',
                 day_of_week='mon-fri',
                 hour='8,14',
                 minute=00)
    schd.start()

    # Keeping the main thread alive
    while True:
        time.sleep(300)
Пример #3
0
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
    """Monitor weather for a lat/lon locaation.

    This sample service shows how you can use an external weather
    service to emit to a virtual sensor in the Helium platform.

    \b
    he-weather  --every <seconds> <sensor> <lat> <lon>

    The given virtual <sensor> is the id of a created Helium virtual
    sensor.

    The optional <seconds> parameter sets how often weather
    information needs to get fetched and posted to Helium. If the
    parameter is not provided a default (60 seconds)) is picked.

    This will run the service based on the given lat/lon.

    """
    client = Client(api_token=helium_key)
    sensor = Sensor.find(client, sensor)

    logging.basicConfig()
    scheduler = BlockingScheduler()
    scheduler.add_job(_process_weather, "interval",
                      seconds=every,
                      next_run_time=datetime.now(),
                      args=[darksky_key, lat, lon, sensor])
    click.echo("Checking every {} seconds".format(every))
    scheduler.start()
def getJob(fileName='AutoSentChatroom.xlsx', sheetName='Chatfriends'):
    scheduler = BlockingScheduler()
    workbook = xlrd.open_workbook(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName))
    sheet = workbook.sheet_by_name(sheetName)
    iRows = sheet.nrows
    index = 1
    for i in range(1, iRows):
        textList = sheet.row_values(i)
        name = textList[0]
        context = textList[2]
        float_dateTime = textList[1]
        date_value = xlrd.xldate_as_tuple(float_dateTime, workbook.datemode)
        date_value = datetime(*date_value[:5])
        if datetime.now() > date_value:
            continue
        date_value = date_value.strftime('%Y-%m-%d %H:%M:%S')
        textList[1] = date_value
        scheduler.add_job(SentChatMsg, 'date', run_date=date_value,
                          kwargs={"name": name, "context": context, 'scheduler':scheduler})
        print("任务" + str(index) + ":\n"
                                  "待发送时间:" + date_value + "\n"
                                                          "待发送到:" + name + "\n"
                                                                           "待发送内容:" + context + "\n"
                                                                                                "******************************************************************************\n")
        index = index + 1
        if index == 1:
            print("***没有任务需要执行***")      
    return scheduler
Пример #5
0
    def __init__(self, top_data_dir, index_file, dir_files_to_parse,
                 files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(
            no_gems_header=True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5
        self._reference_date = datetime.datetime.now() + datetime.timedelta(
            seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination
Пример #6
0
 def __init__(self):
     self.scheduler = BlockingScheduler(
         jobstores=APSCHEDULER_SETTINGS['jobstores'],
         executors=APSCHEDULER_SETTINGS['executors'],
         job_defaults=APSCHEDULER_SETTINGS['job_defaults'],
         timezone=TIMEZONE_PST8PDT)
     pass
Пример #7
0
    def __init__(self,
                 blocking=True,
                 timezone='UTC',
                 config_path='.',
                 logger_level=None,
                 *args,
                 **kwargs):
        """
        Create ReminderDaemon object.

        :param boolean blocking:
            Determines if Scheduler should be BlockingScheduler or BackgroundScheduler.
        :param str timzone: Timezone for the scheduler to use when scheduling jobs.
        :param str config_path: Path to configuration files.
        :param int logger_level: Level to set logger to.
        """
        self.logger = logging.getLogger(__name__)
        if logger_level:
            self.logger.setLevel(logger_level)
        self.logger.debug('initializing daemon')
        self.scheduler = BlockingScheduler(
            timezone=timezone) if blocking else BackgroundScheduler(
                timezone=timezone)
        self.reminders = []
        self.configs = {}
        self.timezone = timezone
        self._observer = Observer()
        self.config_path = config_path
        self._watchdog_handler = PatternMatchingEventHandler('*.yaml;*.yml')
        self._watchdog_handler.on_created = self.on_created
        self._watchdog_handler.on_modified = self.on_created
        self._watchdog_handler.on_deleted = self.on_deleted
        self._observer.schedule(self._watchdog_handler, self.config_path)
Пример #8
0
    def __init__(self, background=False, deamon=True, **kwargs):
        logging.basicConfig(format="[%(asctime)s] %(message)s",
                            atefmt="%Y-%m-%d %H:%M:%S")
        logging.getLogger('apscheduler').setLevel(logging.DEBUG)

        if background:
            self.sched = BackgroundScheduler(deamon=deamon)  # background
        else:
            self.sched = BlockingScheduler(deamon=deamon)  # foreground

        # TODO: Read from configuration file.
        self.sched.configure(
            jobstores={
                # "sqlite": SQLAlchemyJobStore(url='sqlite:///app/database/example.db'),
                # "default": MemoryJobStore()
                "default":
                SQLAlchemyJobStore(url='sqlite:///app/database/example.db')
            },
            executors={
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 3
            },
            timezone=get_localzone()  # Asia/Seoul
        )

        self.retried = 0
        self.logger = logging.getLogger('apscheduler')

        super(JobLauncher, self).__init__()
Пример #9
0
 def cmd_start(self):
     from apscheduler.schedulers.background import BlockingScheduler
     sched = BlockingScheduler()
     with transaction.manager:
         Scheduler.add_all_to_apscheduler(sched, DbSession, user=SYSTEM_UID,
                                          begin_transaction=True)
     sched.start()
     sched.print_jobs()
Пример #10
0
    def start(self):
        self.scheduler = BlockingScheduler(timezone=utc)

        self.scheduler.add_job(self._purge_images,
                               'cron',
                               hour='5',
                               minute='0')
        self.scheduler.start()
Пример #11
0
 def run(self):
     """Run watcher"""
     self.logger.info("Running watcher ...")
     scheduler = BlockingScheduler()
     scheduler.add_job(self.watching, 'interval', seconds=self.config["interval"])
     try:
         scheduler.start()
     except (KeyboardInterrupt, SystemExit):
         pass
Пример #12
0
    def month_task(self):

        def func():
            self.dh.aum_total()
            self.dh.debt_total()


        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='*/1', day='1', hour='5') # 每月一号五点运行
Пример #13
0
def main():
    itchat.auto_login()
    # itchat.auto_login(hotReload=True)
    scheduler = BlockingScheduler()
    # job = scheduler.add_job(send_file_by_time, 'date', next_run_time='2019-05-06 16:46:30')
    trigger = DateTrigger(run_date='2019-05-10 15:25:30')
    # job = scheduler.add_job(send_file_by_time, trigger='date', next_run_time='2019-05-10 14:30:30')
    job = scheduler.add_job(send_file_by_time, trigger)
    scheduler.start()
    job.remove()
Пример #14
0
 def __init__(self, reporter: ResultReporter):
     self.reporter = reporter
     self.scheduler = BlockingScheduler()
     self.events = list()
     log_path = static_setting.settings["CaseRunner"].log_path
     log_file = os.path.join(log_path, "event_scheduler_log.log")
     self.log = logger.register("EventScheduler",
                                filename=log_file,
                                for_test=True)
     self.scheduler.add_listener(self._event_listen, EVENT_JOB_EXECUTED)
 def __init__(self, main_config):
     self.main_config = main_config
     self.create_dirs()
     self.logger = get_logger(main_config['project_name'],
                              file=main_config['logs_dir'],
                              level=main_config['log_level'])
     self.board = None
     self.scheduler = BlockingScheduler()
     self.setup()
     atexit.register(self._exit)
Пример #16
0
    def run(self):

        job_defaults = {'coalesce': True, 'max_instances': 1}
        self.blocker = BlockingScheduler(job_defaults=job_defaults)
        self.blocker.add_job(self.parse_feed,
                             'cron',
                             second=f'*/{self.sleep}',
                             id='parse_feed')

        self.blocker.start()
Пример #17
0
 def update_forever(self):
     if self.scheduler and self.scheduler.running:
         return
     self.scheduler = BlockingScheduler()
     self.scheduler.add_job(WeatherSign.update,
                            trigger='cron',
                            minute="0,30",
                            max_instances=1,
                            coalesce=True,
                            args=[self])
     self.scheduler.start()
Пример #18
0
def job_scheduler(config):  # pragma: no cover
    """
    Setup logging, start the job scheduler and serve prometheus metrics
    """

    LOGGER.info("Sarting application at http://localhost:8000")

    executors = {
        "default": ThreadPoolExecutor(5),
    }
    job_defaults = {"coalesce": False, "max_instances": 5}

    scheduler = BlockingScheduler(
        executors=executors, job_defaults=job_defaults, timezone=utc
    )

    scheduler.add_job(
        func=download_speed,
        trigger="interval",
        max_instances=1,
        seconds=config["jobs"]["download"]["interval"],
        args=[config["downloadURL"]],
        id="download_speed",
        next_run_time=datetime.datetime.utcnow(),
        start_date=datetime.datetime.utcnow(),
    )

    scheduler.add_job(
        func=latency,
        trigger="interval",
        seconds=config["jobs"]["ping"]["interval"],
        args=[config["icmpDestHost"]],
        id="ping",
        next_run_time=datetime.datetime.utcnow(),
        start_date=datetime.datetime.utcnow(),
    )

    # create temporary upload file
    with open("test-upload", "wb") as out:
        out.truncate(1024 * 1024 * 50)

    scheduler.add_job(
        func=upload_speed,
        trigger="interval",
        seconds=config["jobs"]["upload"]["interval"],
        args=[config["uploadURL"]],
        id="upload_speed",
        next_run_time=datetime.datetime.utcnow(),
        start_date=datetime.datetime.utcnow(),
    )

    # start prometheus server to serve /metrics and /describe endpoints
    start_http_server(addr="0.0.0.0", port=8000)  # nosec
    scheduler.start()
Пример #19
0
 def __init__(self):
     self.scheduler = BlockingScheduler(executors={
         'default':
         ThreadPoolExecutor(15),
         'processpool':
         ProcessPoolExecutor(1)
     },
                                        job_defaults={
                                            'coalesce': False,
                                            'max_instances': 1
                                        })
Пример #20
0
 def __init__(self, local_timezone=None):
     self._timezone = local_timezone
     self._scheduler = BlockingScheduler(timezone=self._timezone)
     self._jobs = {}
     self._trigger_types = TIMER_TRIGGER_TYPES.keys()
     self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                            update_handler=self._handle_update_trigger,
                                            delete_handler=self._handle_delete_trigger,
                                            trigger_types=self._trigger_types,
                                            queue_suffix=self.__class__.__name__,
                                            exclusive=True)
     self._trigger_dispatcher = TriggerDispatcher(LOG)
Пример #21
0
 def __init__(self, dataqueue, statusQueue, title=None, dis=None):
     self.x = np.array([])
     self.y = np.array([])
     self.firstTime = None
     self.title= title
     self.dis = dis
     self.scheduler = BlockingScheduler()
     self.dataqueue = dataqueue
     self.statusQueue = statusQueue
     self.dataUpdate_task = self.scheduler.add_job(self.dataUpdate, 'interval', seconds=.1)
     self.eventsUpdate_task = self.scheduler.add_job(self.eventsUpdate, 'interval', seconds=1)
     self.scheduler.start()
Пример #22
0
class dataStorage():
    def __init__(self, dataqueue, statusQueue, title=None, dis=None):
        self.x = np.array([])
        self.y = np.array([])
        self.firstTime = None
        self.title= title
        self.dis = dis
        self.scheduler = BlockingScheduler()
        self.dataqueue = dataqueue
        self.statusQueue = statusQueue
        self.dataUpdate_task = self.scheduler.add_job(self.dataUpdate, 'interval', seconds=.1)
        self.eventsUpdate_task = self.scheduler.add_job(self.eventsUpdate, 'interval', seconds=1)
        self.scheduler.start()

    def dataUpdate(self):
        while True:
            try:
                item = self.dataqueue.get(False)
                if item:
                    if not self.firstTime:
                        self.firstTime = item[0]
                    self.x = np.append(self.x, item[0]-self.firstTime)
                    self.y = np.append(self.y, item[1])
                else:
                    break
            except:
                break

    def eventsUpdate(self):
        try:
            item = self.statusQueue.get(False)
            if item:
                if item == 'terminate':
                    print 'Event is set'
                    d2s = {}
                    d2s['title'] = self.title
                    d2s['dis'] = self.dis
                    d2s['x'] = self.x
                    d2s['y'] = self.y
                    print d2s
                    try:
                        pickle.dump( d2s, open('time_{0}_value_{1}.pkl'.format(int(time.time()), self.title),'wb'))
                    except Exception, e:
                        print 'Failed to dump'
                        print Exception,e

                    self.x = np.array([])
                    self.y = np.array([])
                    self.firstTime = None
        except Exception,e:
            pass
Пример #23
0
 def task(self):
     """
     !!!!this function is useless don't run it!!!!
     Parameters:
         year (int|str) – 4-digit year
         month (int|str) – month (1-12)
         day (int|str) – day of the (1-31)
         week (int|str) – ISO week (1-53)
         day_of_week (int|str) – number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
         hour (int|str) – hour (0-23)
         minute (int|str) – minute (0-59)
         second (int|str) – second (0-59)
         start_date (datetime|str) – earliest possible date/time to trigger on (inclusive)
         end_date (datetime|str) – latest possible date/time to trigger on (inclusive)
         timezone (datetime.tzinfo|str) – time zone to use for the date/time calculations (defaults to scheduler timezone)
     :return:
     """
     scheduler = BlockingScheduler()
     #scheduler.add_job(self.task_func, trigger='cron', day='*/1', hour='1')
     scheduler.add_job(self.task_func, trigger='cron', minute='*/5')
     #scheduler.add_job(func, 'date', run_date='2016-10-25 13:51:30')
     try:
         scheduler.start()
     except Exception as e:
         # TODO 执行错误的处理方案
         Global.logger.error('定时任务错误:%s' % e)
         scheduler.shutdown()
Пример #24
0
    def half_year_task(self):
        def func():
            month = datetime.datetime.now().month - 1
            year = datetime.datetime.now().year
            if month == 0:
                month = 12
                year = year - 1

            half_year = month/6

            self.dh.customer_value(year,half_year)

        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='7,12', day='2', hour='5') # 7月12月2号五点计算客户价值
Пример #25
0
def main(config):
    """
    Setup logging, start the job scheduler and serve prometheus metrics
    """

    LOGGER.info('Sarting application at http://localhost:8000')

    executors = {
        'default': ThreadPoolExecutor(20),
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 3
    }

    scheduler = BlockingScheduler(
        executors=executors, job_defaults=job_defaults, timezone=utc)


    scheduler.add_job(download_speed, 'interval', seconds=600,
                      args=[config['downloadURL']], id='download_speed')
    scheduler.add_job(latency, 'interval', seconds=60,
                      args=[config['icmpDestHost']], id='ping')

    # start prometheus server to serve /metrics and /describe endpoints
    start_http_server(8000)
    scheduler.start()
Пример #26
0
class MonkeyRunner(object):

    def __init__(self, config_file):
        self.config_file = config_file
        self.monkey_list = [
            dict(class_name=ChaosMonkey),
            dict(class_name=JanitorMonkey),
            dict(class_name=SecurityMonkey),
        ]
        self.scheduler = BlockingScheduler()
        for m in self.monkey_list:
            m['class_name'](config_file, self.scheduler)

    def start(self):
        self.scheduler.start()
Пример #27
0
def get_scheduler(blocking=False):
    scheduler = None
    if blocking:
        scheduler = BlockingScheduler()
    else:
        scheduler = BackgroundScheduler()
    return scheduler
Пример #28
0
    def __init__(self, main_config):
        self.main_config = main_config
        self.main_config['system_state'] = {}

        self.create_dirs()
        self.logger = get_logger(main_config['project_name'],
                                 file=main_config['logs_dir'],
                                 level=main_config['log_level'])
        self.board = None
        self.scheduler = BlockingScheduler(
            logger=self.logger,
            job_defaults={'misfire_grace_time': 45},
        )
        self.setup()
        
        atexit.register(self._exit)
Пример #29
0
 def init_scheduler(self, **kwargs: Any) -> None:
     if self.blocking:
         self.scheduler = BlockingScheduler(jobstores=self.jobstores,
                                            **kwargs)
     else:
         self.scheduler = BackgroundScheduler(jobstores=self.jobstores,
                                              **kwargs)
Пример #30
0
def get_rescheduler():
    timer = BlockingScheduler()

    time_spec = {
        'seconds': cfg.CONF.scheduler.rescheduling_interval,
        'timezone': aps_utils.astimezone('UTC')
    }

    timer.add_job(recover_delayed_executions,
                  trigger=IntervalTrigger(**time_spec),
                  max_instances=1,
                  misfire_grace_time=60,
                  next_run_time=date_utils.get_datetime_utc_now(),
                  replace_existing=True)

    return timer
Пример #31
0
    def seasonly_task(self):
        def func():
            # 每个月计算前一个月的数据
            month = datetime.datetime.now().month - 1
            year = datetime.datetime.now().year
            if month == 0:
                month = 12
                year = year-1

            season = month/3

            # 计算生命周期
            self.dh.run_life_cycle(year,season)

        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='1,4,7,10', day='2', hour='2')
Пример #32
0
def get_rescheduler():
    timer = BlockingScheduler()

    time_spec = {
        'seconds': cfg.CONF.scheduler.rescheduling_interval,
        'timezone': aps_utils.astimezone('UTC')
    }

    timer.add_job(recover_delayed_executions,
                  trigger=IntervalTrigger(**time_spec),
                  max_instances=1,
                  misfire_grace_time=60,
                  next_run_time=date_utils.get_datetime_utc_now(),
                  replace_existing=True)

    return timer
Пример #33
0
 def __init__(self, db, task_instance, task_param):
     self.task_instance = task_instance
     self.task_param = task_param
     self.db = db
     # invoke log
     self.invoke_log_map = {}
     self.jobs = {}
     logging.config.fileConfig("../logger.ini")
     self.logger = logging.getLogger("taskExecutor")
     invoke_count = int(self.task_param.get_invoke_args()['invoke_count'])
     executors = {
         'default': {
             'type': 'threadpool',
             'max_workers': invoke_count + 1
         }
     }
     self.scheduler = BlockingScheduler(executors=executors)
Пример #34
0
 def _run(self):
     """ run your fun"""
     scheduler = BlockingScheduler()
     scheduler.add_job(PrintText().start, 'interval', seconds=3)
     # scheduler.add_job(PrintText().start, 'cron',  hour=start_hour, minute=start_minute,second='0')
     try:
         scheduler.start()
     except KeyboardInterrupt, SystemExit:
         scheduler.shutdown()
         logger.error('Exit The Job!')
Пример #35
0
def main():
    log = logging.getLogger("main")
    cameras, light, project = parse_configs()

    storage_dir = Path(project["storage_dir"])
    if not storage_dir.is_dir():
        storage_dir.mkdir(parents=True)

    # We will use queue to connect scheduler thread with I2C communication thread
    connection_queue = Queue()

    # Create separate thread for I2C communication
    log.info("Starting I2C thread")
    i2c_driver = I2CDriver(0x04)
    i2c_thread = Thread(target=i2c_thread_function,
                        args=(i2c_driver, connection_queue))
    i2c_thread.start()

    log.info("Running pipeline for the first time")
    pipeline_executor = PipelineExecutor(storage_dir, cameras, light,
                                         connection_queue, project["pipeline"])

    # For the first time, execute pipeline manually, then schedule it
    pipeline_executor.execute()

    # Create a scheduler and add job to it
    log.info("Scheduling the pipeline")
    scheduler = BlockingScheduler()
    scheduler.add_job(
        func=(lambda executor=pipeline_executor: executor.execute()),
        trigger="interval",
        seconds=project['run_interval_seconds'])
    atexit.register(lambda: scheduler.shutdown())
    scheduler.start()  # Blocks thread
Пример #36
0
 def __init__(self):
     conf = configparser.ConfigParser()
     conf.read("../agent.ini")
     ip = conf.get("redis", "ip")
     port = conf.getint("redis", "port")
     timeout = conf.getint("redis", "timeout")
     self.invoker_id = self._get_invoker_id()
     self.max_tasks = conf.getint("invoker", "max_tasks")
     self.live_seconds = conf.getint("invoker", "live_seconds")
     self.db = SchedulerDb(ip, port, timeout)
     logging.config.fileConfig("../logger.ini")
     self.logger = logging.getLogger("main")
     executors = {
         'default': {'type': 'processpool', 'max_workers': self.max_tasks + 1}
     }
     self.blockScheduler = BlockingScheduler()
     self.jobs = {}
     self.lock = threading.Lock()
Пример #37
0
    def __init__(self,
                 queue=None,
                 notify_on_exception=True,
                 name=None,
                 **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue,
                         notify_on_exception=notify_on_exception,
                         name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5 * 60})
        self.scheduler.add_job(self.wrapped_check, **kwargs)
Пример #38
0
    def initialize_scheduler(self, db_jobs, topic):
        """
        Initializes the scheduler
        :param db_jobs: The jobs to schedule
        :param topic: The command topic - StartAuction
        :return: Nothing
        """
        scheduler = BlockingScheduler()
        print('Scheduler initialized...')
        # Schedule the auctions and start the scheduler running
        self.schedule_auctions(scheduler, db_jobs, topic)
        print('Scheduler Running...')

        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            print('Scheduler Stopped...')
            pass
Пример #39
0
 def __init__(self, config_file):
     self.config_file = config_file
     self.monkey_list = [
         dict(class_name=ChaosMonkey),
         dict(class_name=SecurityMonkey),
     ]
     self.twitter = self.get_twitter_connector()
     self.scheduler = BlockingScheduler()
     for m in self.monkey_list:
         m['class_name'](config_file, self.scheduler, self.twitter)
Пример #40
0
 def log(self, container_id, is_watch=False, default_loop_time=20):
     if is_watch:
         scheduler = BlockingScheduler()
         scheduler.add_job(
             self.output_log_single(container_id=container_id),
             'interval',
             seconds=default_loop_time)
         try:
             scheduler.start()
         except (KeyboardInterrupt, SystemExit):
             scheduler.shutdown()
     else:
         self.output_log_single(container_id=container_id)
Пример #41
0
 def __init__(self, local_timezone=None):
     self._timezone = local_timezone
     self._scheduler = BlockingScheduler(timezone=self._timezone)
     self._jobs = {}
     self._trigger_types = TIMER_TRIGGER_TYPES.keys()
     self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                            update_handler=self._handle_update_trigger,
                                            delete_handler=self._handle_delete_trigger,
                                            trigger_types=self._trigger_types,
                                            queue_suffix='timers')
     self._trigger_dispatcher = TriggerDispatcher(LOG)
Пример #42
0
 def __init__(self, local_timezone=None):
     self._timezone = local_timezone
     self._scheduler = BlockingScheduler(timezone=self._timezone)
     self._jobs = {}
     self._trigger_types = list(TIMER_TRIGGER_TYPES.keys())
     self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                            update_handler=self._handle_update_trigger,
                                            delete_handler=self._handle_delete_trigger,
                                            trigger_types=self._trigger_types,
                                            queue_suffix=self.__class__.__name__,
                                            exclusive=True)
     self._trigger_dispatcher = TriggerDispatcher(LOG)
Пример #43
0
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
Пример #44
0
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)
Пример #45
0
 def __init__(self):
     KEY1 = 'UwVrGX4x2r+Pk7bf1aItja=='
     self.token = '4ac1c0259b27f13dfb78c2959da3bf4e'
     self.pc = prpcrypt(b(KEY1))  # 初始化密钥
     self.info_log = get_logger('logs/info.log')
     self.db = self.connect_db()
     # 查找剩余需要爬取的疾病数量
     self.max_len = self.db.disease.count_documents({ 'finished': 0 })
     self.count = 0
     print('Number of the lefting disease: {}'.format(self.max_len))
     self.info_log.warning('Number of the lefting disease: {}'.format(self.max_len))
     if self.max_len > 0:
         print('Task started.')
         print('-' * 50)
         self.info_log.warning('Task started.....')
         # 定时爬取
         self.scheduler = BlockingScheduler()
         self.scheduler.add_job(self.request_data, 'interval', id='main_schedule', seconds=120, args=[self])
         self.scheduler.start()
Пример #46
0
    def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5 
        self._reference_date = datetime.datetime.now() +  datetime.timedelta(seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination
Пример #47
0
    def daily_task(self):
        def func():
            day = datetime.datetime.now().strftime('%Y-%m-%d')
            # 活期
            self.da.init_balance(day, 1)
            self.logger.info(day, '活期每日余额计算完成')
            # 定期
            self.da.init_balance(day, 2)
            self.logger.info(day, '定期每日余额计算完成')
            # 理财
            self.da.init_balance(day, 3)
            self.logger.info(day, '理财每日余额计算完成')

        scheduler = BlockingScheduler()
        scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行

        try:
            scheduler.start()
        except Exception as e:
            # TODO 执行错误的处理方案
            self.logger.error('每日AUM计算出错:',e)
            scheduler.shutdown()
Пример #48
0
def get_theme_colors(theme_name):
    with open(theme_name, "r") as f:
        t = json.load(f)
        return t['colors']

if __name__ == "__main__":
    settings = parse_settings()
    theme = Theme(settings['theme'])
    colors = theme.colors

    # create the bar
    bar = barhandler(theme)

    #Configure scheduler
    scheduler = BlockingScheduler()
    scheduler.configure(timezone='Europe/Stockholm')

    #Schedule jobs
    scheduler.add_job(get_time, 'interval', seconds=30, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_battery, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_cpu, 'interval', seconds=5, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_mpd, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_volume, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_wifi, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])

    #Start continious jobs
    bspccontrol = BspcControl(bar)
    Thread(target=bspccontrol.inputhandler, args=(colors,)).start()

    #Start scheduler
Пример #49
0
    if len(my_accounts) is 0:
        brain_key = rpc.suggest_brain_key()
        account_registered, account_registration_response = register_account_faucet(config.account, brain_key['pub_key'])
        if account_registered:
            rpc.import_key(config.account, brain_key['wif_priv_key'])

            print("Account: %s succesfully registered" % config.account)
            print(rpc.list_my_accounts())

            print("Brain key: %s" % brain_key['brain_priv_key'])
            print("Write it down/back it up ^")

            print("Send funds to %s and start the bot again" % config.account)
        else:
            print("Account creation failed")
            print(brain_key)
            print(config.faucet + " response: ", account_registration_response)

    else:
        print(my_accounts)
        print(config.account)
        print(rpc.list_account_balances(config.account))
        print("Bot config: " + str(config.bots["MakerRexp"]))
        
        bot.init(config)
 
        run_bot() # running the bot before the scheduler, otherwise it will run for the first time after config.interval
        scheduler = BlockingScheduler()
        scheduler.add_job(run_bot, 'interval', hours=config.interval)
        scheduler.start()
      sock = socket.socket()
      sock.connect((carbonServer, carbonPort))
    except socket.error, err:
      print "Could not connect to %s:%s, error code %s, %s" % ( carbonServer, carbonPort, err[0], err[1] )
      return 127
    binary = "/opt/nagios/bin/nagiostats"
    stat = ','.join(unicode(i) for i in stats)
    command = binary + " --mrtg --data=" + stat
    nagprocess = Popen(command, shell=True, stderr=PIPE, stdout=PIPE, universal_newlines=True)
    stdout, stderr = nagprocess.communicate()
    stdout = stdout.splitlines()
    for stat, metaData in stats.items():
        metricName, descr = metaData
        metricValue = stdout[0]
        del stdout[0]
        string = 'datacenter.stats.nagios.%s.%s %s %i\n' % (hostname, metricName, metricValue, calltime)
        sock.send(string)
        print "%s" % string
    sock.close()


if __name__ == "__main__":

  sched = BlockingScheduler()
  sched.add_job(collectStats, 'interval',  seconds=10)
  ret = collectStats()
  try:
    sched.start()
  except (KeyboardInterrupt, SystemExit):
    pass
Пример #51
0
#!/bin/python

from apscheduler.schedulers.background import BlockingScheduler
import notify2
import time
import subprocess
import logging

def stretch():
    notify2.init('Stretch')
    n = notify2.Notification('Get Up !', 'Time to stretch a bit ')
    n.show()
    subprocess.call(['espeak', '-g', '5', 'Get Up. Time to Stretch' ])
    time.sleep(600)
    n = notify2.Notification('Enough Rest', 'Get back to work ')
    n.show();
    subprocess.call(['espeak', '-g', '5', 'Get back to work' ])



logging.basicConfig()
scheduler = BlockingScheduler()
scheduler.add_job(stretch, 'interval', hours = 1)
scheduler.start()
Пример #52
0
 def __init__(self, sensor_service=None):
     self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
     self._sensor_service = sensor_service
     self._log = self._sensor_service.get_logger(self.__class__.__name__)
     self._scheduler = BlockingScheduler(timezone=self._timezone)
     self._jobs = {}
Пример #53
0
class St2TimerSensor(Sensor):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, sensor_service=None):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._sensor_service = sensor_service
        self._log = self._sensor_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def run(self):
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type, 'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._sensor_service.dispatch(trigger, payload)
Пример #54
0
from datetime import datetime

from apscheduler.schedulers.background import BlockingScheduler

scheduler = BlockingScheduler()

def job_function():
    print("Hello World")


scheduler.add_job(job_function, 'interval', seconds=0.01)

scheduler.start()
Пример #55
0
class DisseminationPlayer(object):

    MIDNIGHT = datetime.time(0,0,0)

    def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5 
        self._reference_date = datetime.datetime.now() +  datetime.timedelta(seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination


    def add_jobs(self):
        """
          Create the jobs from the reference time
        :return:
        """
        for a_file in self._files:
            f_path = "%s/%s" % (self._dir_files, a_file)
            print("Parsing xferlog file %s" % f_path )
            fd = open(f_path)
            self._parser.set_lines_to_parse(fd)
            for elem in self._parser:
                #print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
                #find file in index
                filepath = self._index.get(elem['file'], None)
                if filepath:
                    #get time difference
                    midnight_date = utc.localize(datetime.datetime.combine(elem['time'].date(), self.MIDNIGHT))
                    #print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
                    time_diff = elem['time'] - midnight_date
                    scheduled_date = self._reference_date + time_diff
                    #create job and schedule it with the time difference added to the starting reference time
                    d_trigger = DateTrigger(scheduled_date)

                    self._scheduler.add_job(self._job_func, d_trigger, args=[filepath, self._destination])
                else:
                    print("Could not find %s\n in Index" % (elem['file']))

        print("Player. %d jobs scheduled.\n" % (len(self._scheduler.get_jobs())))


    def start(self):
        """
        :return:
        """
        self._scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        print("Start Scheduler. Jobs will start to be played in %d sec." % self._defer_time)
        self._scheduler.start()
Пример #56
0
Файл: base.py Проект: hejin/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix=self.__class__.__name__,
                                               exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger),
                                                          trigger.get('name', uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
Пример #57
0
get_coverage = openweather.run

if __name__ == '__main__':
    cba = timezone('America/Argentina/Cordoba')

    log.info("Starting Domus core...")

    jobstores = {
        'mysql': SQLAlchemyJobStore(url=JOB_STORAGE)
    }
    executors = {
        'default': ThreadPoolExecutor(20),
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 5
    }
    log.info("Starting core...")
    log.debug("Connecting to job store...")
    scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=cba)
    log.debug("Creating Jobs...")
    scheduler.add_job(cache_data, 'interval', minutes=20, id='data_from_wunderground')
    scheduler.add_job(get_coverage, 'interval', minutes=5, id='data_from_openwrt')
    scheduler.add_job(do_forecast,  trigger='cron', minute='30', hour='8,13', id='twitting forecast')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print "quitting"
        scheduler.shutdown(wait=False)
        pass
Пример #58
0
try:
    import Adafruit_DHT
except ImportError, e:
    class Adafruit_DHTMOCK():
        def read_retry(self):
            return 25, 50
    Adafruit_DHT = Adafruit_DHTMOCK()
import requests
import logging
from apscheduler.schedulers.background import BlockingScheduler

THERMOSTAT_URI = 'http://192.168.1.214:5000/api/v1/temperature/'

def main():
    humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '17')
    if humidity is not None and temperature is not None:
        requests.post(THERMOSTAT_URI, data=dict(temperature=temperature, humidity=humidity))
        logger.warn('Temp={0:0.1f}*C  Humidity={1:0.1f}%'.format(temperature, humidity))
    else:
        logger.error('Failed to get reading. Try again!')

if __name__ == '__main__':
    logging.basicConfig(level=logging.WARN, format='%(levelname)s - %(asctime)s %(message)s')
    logger = logging.getLogger('main')
    scheduler = BlockingScheduler()
    scheduler.add_job(main, 'interval', seconds=60)
    logger.warn('starting scheduler')
    scheduler.start()

                    Mirrors-AutoSync

  A tool to set schedules to rsync from remote server.
  developed by eastpiger from Geek Pie @ ShanghaiTech

                - http://www.geekpie.org/
                - http://www.eastpiger.com/
 - https://github.com/ShanghaitechGeekPie/Mirrors-AutoSync

===========================================================
''')

print('[Loading config file]\n')

scheduler = BlockingScheduler(
	timezone = 'Asia/Shanghai',
)

config_file = open(config_file_dir, 'r')

content = json.loads(config_file.read())

base_dir = content['base_dir']
status_file_dir = content['status_file_dir']
log_file_dir = content['log_file_dir']

for i in content['schedules']:
	t = task(i['name'], i['schedule'], i['path'])
	t.setup(scheduler)

config_file.close()
Пример #60
0
Файл: te.py Проект: gaubert/rodd
job_defaults = {
    'coalesce': False,
    'max_instances': 3
}

def runnable(file_path):
    """

    :return:
    """
    # run a job
    print("JOB now starting. FIle path %s" % (file_path))
    print("JOB .....")
    print("JOB now finished")

scheduler = BlockingScheduler()

# .. do something else here, maybe add jobs etc.

the_date = datetime.datetime.now() +  datetime.timedelta(seconds=2)

d_trigger = DateTrigger(the_date)

l = lambda: runnable('/tmtmtmtmtmtmt')

scheduler.add_job(func=runnable, trigger=d_trigger, args=['tick\n'])

the_date = datetime.datetime.now() +  datetime.timedelta(seconds=2)

d_trigger = DateTrigger(the_date)