示例#1
0
def set_watchdog_on_wdk_models():
    """
    Method updating database with user defined Workflows.

    :return: None
    """

    try:
        watchdog = Watchdog.load()
    except ProgrammingError:
        # raised in case of not existing models in db (e.g. on the python manage.py migrate)
        print("Watchdog singleton cannot be fetched from db.")
        return

    if not watchdog.running and WFE_WATCHDOG_INTERVAL > 0:
        # order deregister_watchdog() executions as exit function.
        atexit.register(deregister_watchdog)

        # mark watchdog as running
        watchdog.running = True
        watchdog.save()
        # schedule periodic watchdog's execution
        scheduler = BlockingScheduler(daemon=True)
        scheduler.add_job(update_wdk_models,
                          "interval",
                          seconds=WFE_WATCHDOG_INTERVAL)
        scheduler.start()
    elif WFE_WATCHDOG_INTERVAL <= 0:
        print(
            f"Watchdog turned of by WFE_WATCHDOG_INTERVAL equal: {WFE_WATCHDOG_INTERVAL}"
        )
    elif watchdog.running:
        print(f"Watchdog process already running.")
示例#2
0
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
示例#3
0
 def task(self):
     """
     !!!!this function is useless don't run it!!!!
     Parameters:
         year (int|str) – 4-digit year
         month (int|str) – month (1-12)
         day (int|str) – day of the (1-31)
         week (int|str) – ISO week (1-53)
         day_of_week (int|str) – number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
         hour (int|str) – hour (0-23)
         minute (int|str) – minute (0-59)
         second (int|str) – second (0-59)
         start_date (datetime|str) – earliest possible date/time to trigger on (inclusive)
         end_date (datetime|str) – latest possible date/time to trigger on (inclusive)
         timezone (datetime.tzinfo|str) – time zone to use for the date/time calculations (defaults to scheduler timezone)
     :return:
     """
     scheduler = BlockingScheduler()
     #scheduler.add_job(self.task_func, trigger='cron', day='*/1', hour='1')
     scheduler.add_job(self.task_func, trigger='cron', minute='*/5')
     #scheduler.add_job(func, 'date', run_date='2016-10-25 13:51:30')
     try:
         scheduler.start()
     except Exception as e:
         # TODO 执行错误的处理方案
         Global.logger.error('定时任务错误:%s' % e)
         scheduler.shutdown()
示例#4
0
 def get_code(self):
     
     sched = BlockingScheduler(timezone='MST')
     #表示从周一到周五9点30分10秒更新
     #sched.add_job(scheduler, 'cron', day_of_week='mon-fri', hour='9', minute='30', second='10')
     sched.add_job(self.get_code_true, 'interval',  seconds=300)
     sched.start()
示例#5
0
def start_jukebox(music_folder=music_folder, db_path=db):
    """
    :param music_folder: string, path to music
    :param db_path: string, path to store database
    :return: None
    """

    db = 'sqlite:///{}'.format(db_path)

    engine = create_engine(db, echo=False)
    Session = sessionmaker(bind=engine)
    session = Session()

    # initiate database
    if not os.path.isfile('../db/dev.db'):
        print('No existing database found, starting new session')
        Base.metadata.create_all(engine)

    populate(session, music_folder)

    round_end = setup_new_round(session, first_round=True)
    first_round = datetime.now() + timedelta(minutes=0, seconds=1)
    scheduler = BlockingScheduler()

    # first songs starts after first round of voting (1 minute)
    scheduler.add_job(play_next_song,
                      'date',
                      run_date=first_round,
                      args=[Session, scheduler, music_folder])
    print('Starting Jukebox')
    scheduler.start()

    return None
def getJob(fileName='AutoSentChatroom.xlsx', sheetName='Chatfriends'):
    scheduler = BlockingScheduler()
    workbook = xlrd.open_workbook(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName))
    sheet = workbook.sheet_by_name(sheetName)
    iRows = sheet.nrows
    index = 1
    for i in range(1, iRows):
        textList = sheet.row_values(i)
        name = textList[0]
        context = textList[2]
        float_dateTime = textList[1]
        date_value = xlrd.xldate_as_tuple(float_dateTime, workbook.datemode)
        date_value = datetime(*date_value[:5])
        if datetime.now() > date_value:
            continue
        date_value = date_value.strftime('%Y-%m-%d %H:%M:%S')
        textList[1] = date_value
        scheduler.add_job(SentChatMsg, 'date', run_date=date_value,
                          kwargs={"name": name, "context": context, 'scheduler':scheduler})
        print("任务" + str(index) + ":\n"
                                  "待发送时间:" + date_value + "\n"
                                                          "待发送到:" + name + "\n"
                                                                           "待发送内容:" + context + "\n"
                                                                                                "******************************************************************************\n")
        index = index + 1
        if index == 1:
            print("***没有任务需要执行***")      
    return scheduler
示例#7
0
def main():
    log = logging.getLogger("main")
    cameras, light, project = parse_configs()

    storage_dir = Path(project["storage_dir"])
    if not storage_dir.is_dir():
        storage_dir.mkdir(parents=True)

    # We will use queue to connect scheduler thread with I2C communication thread
    connection_queue = Queue()

    # Create separate thread for I2C communication
    log.info("Starting I2C thread")
    i2c_driver = I2CDriver(0x04)
    i2c_thread = Thread(target=i2c_thread_function,
                        args=(i2c_driver, connection_queue))
    i2c_thread.start()

    log.info("Running pipeline for the first time")
    pipeline_executor = PipelineExecutor(storage_dir, cameras, light,
                                         connection_queue, project["pipeline"])

    # For the first time, execute pipeline manually, then schedule it
    pipeline_executor.execute()

    # Create a scheduler and add job to it
    log.info("Scheduling the pipeline")
    scheduler = BlockingScheduler()
    scheduler.add_job(
        func=(lambda executor=pipeline_executor: executor.execute()),
        trigger="interval",
        seconds=project['run_interval_seconds'])
    atexit.register(lambda: scheduler.shutdown())
    scheduler.start()  # Blocks thread
示例#8
0
def main() -> None:
    args = docopt(__doc__,
                  version='MSA (store) version ' + __version__,
                  options_first=True)
    if args['--verbose']:
        MSALogger.activate_global_info_logging()
    if args['--log-file']:
        MSALogger.set_logfile(args['--log-file'])

    log.info('Opening database connection...')
    db = MSADataBase(config_file=Defaults.get_db_config())

    if args['--dump-db']:
        log.info('Database has the following entries:')
        for entry in db.dump_table():
            log.info(entry)
        return

    log.info('Opening kafka messaging...')
    kafka = MSAKafka(config_file=Defaults.get_kafka_config())

    store_to_database(kafka.read(), db)
    if args['--single-shot']:
        return

    db_scheduler = BlockingScheduler()
    db_scheduler.add_job(lambda: store_to_database(kafka.read(), db),
                         'interval',
                         seconds=args['--update-interval'] or 30)
    db_scheduler.start()
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
    """Monitor weather for a lat/lon locaation.

    This sample service shows how you can use an external weather
    service to emit to a virtual sensor in the Helium platform.

    \b
    he-weather  --every <seconds> <sensor> <lat> <lon>

    The given virtual <sensor> is the id of a created Helium virtual
    sensor.

    The optional <seconds> parameter sets how often weather
    information needs to get fetched and posted to Helium. If the
    parameter is not provided a default (60 seconds)) is picked.

    This will run the service based on the given lat/lon.

    """
    client = Client(api_token=helium_key)
    sensor = Sensor.find(client, sensor)

    logging.basicConfig()
    scheduler = BlockingScheduler()
    scheduler.add_job(_process_weather,
                      "interval",
                      seconds=every,
                      next_run_time=datetime.now(),
                      args=[darksky_key, lat, lon, sensor])
    click.echo("Checking every {} seconds".format(every))
    scheduler.start()
示例#10
0
def main(config):
    """
    Setup logging, start the job scheduler and serve prometheus metrics
    """

    LOGGER.info('Sarting application at http://localhost:8000')

    executors = {
        'default': ThreadPoolExecutor(20),
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 3
    }

    scheduler = BlockingScheduler(
        executors=executors, job_defaults=job_defaults, timezone=utc)


    scheduler.add_job(download_speed, 'interval', seconds=600,
                      args=[config['downloadURL']], id='download_speed')
    scheduler.add_job(latency, 'interval', seconds=60,
                      args=[config['icmpDestHost']], id='ping')

    # start prometheus server to serve /metrics and /describe endpoints
    start_http_server(8000)
    scheduler.start()
示例#11
0
 def time_send(self):
     self.settime()
     try:
         f= open('time.json', 'r')
     except:
         QMessageBox.information(self,'提示','wrong')
     else:
         data = json.load(f)
         set_time = data['time']
         data = time.strptime(set_time,'%Y-%m-%d %H:%M:%S')
         data = datetime(data[0], data[1], data[2], data[3], data[4], data[5])
         if datetime.now()<=data:
             name = self.choose_obj()
             if name:
                 scheduler = BlockingScheduler()
                 scheduler.add_job(SentChatMsg, 'date', run_date=set_time,
                         kwargs={'username':self.get_username(name), "context": self.radio_checked()})
                 QMessageBox.information(self,'提示框',"设置完成" + ":\n"
                                "待发送时间:" + set_time + "\n"
                                                  +"待发送到:" + name + "\n"
                                                                 +"待发送内容:" + '\n'+self.radio_checked() + "\n"
                                                                                 "******************\n")
                 scheduler.start()
             else:
                 QMessageBox.information(self,'提示','Wrong')
         else :
             QMessageBox.warning(self,'提示','时间设置错误,请重新设置!')
示例#12
0
def cli(ctx, helium_key, darksky_key, lat, lon, sensor, every):
    """Monitor weather for a lat/lon locaation.

    This sample service shows how you can use an external weather
    service to emit to a virtual sensor in the Helium platform.

    \b
    he-weather  --every <seconds> <sensor> <lat> <lon>

    The given virtual <sensor> is the id of a created Helium virtual
    sensor.

    The optional <seconds> parameter sets how often weather
    information needs to get fetched and posted to Helium. If the
    parameter is not provided a default (60 seconds)) is picked.

    This will run the service based on the given lat/lon.

    """
    client = Client(api_token=helium_key)
    sensor = Sensor.find(client, sensor)

    logging.basicConfig()
    scheduler = BlockingScheduler()
    scheduler.add_job(_process_weather, "interval",
                      seconds=every,
                      next_run_time=datetime.now(),
                      args=[darksky_key, lat, lon, sensor])
    click.echo("Checking every {} seconds".format(every))
    scheduler.start()
def main():
    try:
        config = {
                'REDIS_URL':  os.environ['REDIS_URL'],
                'ROUTER_HOST': os.environ['ROUTER_HOST'],
                'ROUTER_USER': os.environ['ROUTER_USER'],
                'ROUTER_PASS': os.environ['ROUTER_PASS'],
                'INTERVAL': int(os.environ.get('INTERVAL', 60)),
                'LOGLEVEL': getattr(logging,
                                    os.environ.get('LOGLEVEL', 'WARNING')),
        }
    except KeyError as exc:
        logging.error("Failed to get configuration from the environment: {}"
                      .format(exc))
        sys.exit(1)

    logging.basicConfig(level=config['LOGLEVEL'])
    logging.getLogger("apscheduler").setLevel(logging.WARNING)

    scheduler = BlockingScheduler()

    logging.warning("Checking the ARP table on `{}' every {} seconds."
                    .format(config['ROUTER_HOST'], config['INTERVAL']))

    scheduler.add_job(run, 'interval', [config],
                      seconds=config['INTERVAL'], coalesce=True)
    scheduler.start()
示例#14
0
def run():
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 10
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors)

    client = api.use('ths', debug=False)
    client.connect(r"c:\\workspace\\同花顺\\\\xiadan.exe", timeout=5)
    client.enable_type_keys_for_editor()
    # add job for computing trendency of all stock
    scheduler.add_job(join_quant_follower_sell,
                      'cron',
                      day_of_week='mon-fri',
                      hour=9,
                      minute=27,
                      args=[client])
    scheduler.add_job(join_quant_follower_buy,
                      'cron',
                      day_of_week='mon-fri',
                      hour=9,
                      minute=31,
                      args=[client])
    # join_quant_follower_sell(client,session)
    # join_quant_follower_buy(client,session)
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.remove_all_jobs()
示例#15
0
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self,
                 queue=None,
                 notify_on_exception=True,
                 name=None,
                 **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue,
                         notify_on_exception=notify_on_exception,
                         name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5 * 60})
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
示例#16
0
def init():
    sched = BlockingScheduler()
    global DB
    DB  = price_db.priceDB()

    # seconds can be replaced with minutes, hours, or days
    sched.add_job(writeToDB, 'cron', minute='*/2', misfire_grace_time = 10 , coalesce = True)
    sched.start()
 def run(self):
     """Run watcher"""
     self.logger.info("Running watcher ...")
     scheduler = BlockingScheduler()
     scheduler.add_job(self.watching, 'interval', seconds=self.config["interval"])
     try:
         scheduler.start()
     except (KeyboardInterrupt, SystemExit):
         pass
示例#18
0
def start_scheduler():
    """Start the job scheduler"""
    app = get_or_create()
    sched = BlockingScheduler(daemon=True)
    sched.add_job(mark_old_processing_data_files_as_failed,
                  'interval',
                  minutes=60,
                  kwargs=dict(app=app))
    sched.start()
示例#19
0
    def month_task(self):

        def func():
            self.dh.aum_total()
            self.dh.debt_total()


        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='*/1', day='1', hour='5') # 每月一号五点运行
示例#20
0
def main(cfgfile, savefile='dev_info.yaml') -> None:
    # Setup
    cfg = Config(cfg=cfgfile)
    scheduler = BlockingScheduler()
    scheduler.add_job(dcheck,
                      'interval',
                      args=[cfg, savefile],
                      minutes=cfg.interval)
    scheduler.start()
    return
示例#21
0
 def _run(self):
     """ run your fun"""
     scheduler = BlockingScheduler()
     scheduler.add_job(PrintText().start, 'interval', seconds=3)
     # scheduler.add_job(PrintText().start, 'cron',  hour=start_hour, minute=start_minute,second='0')
     try:
         scheduler.start()
     except KeyboardInterrupt, SystemExit:
         scheduler.shutdown()
         logger.error('Exit The Job!')
示例#22
0
文件: main.py 项目: krislint/kouzhao
def main():
    scheduler = BlockingScheduler()
    scheduler.add_job(miaosha_kz,
                      'cron',
                      day="*/7",
                      hour='19',
                      minute="0",
                      second='1',
                      timezone=kzconfig.cst_tz)
    scheduler.start()
示例#23
0
def main():
    scheduler = BlockingScheduler()

    for job in JOB_LIST:
        if callable(job['func']):
            scheduler.add_job(**job)

    try:
        scheduler.start()
    except Exception, e:
        log.exception("scheduler quit with exception, e=%s" % e)
示例#24
0
def main():
    config = parse_config()

    db_host = config['postgresql']['host']
    db_port = int(config['postgresql']['port'])
    db_name = config['postgresql']['database']

    api_account = fetch_pass(
        keyword='icinga2 api',
        host=db_host,
        port=db_port,
        db=db_name
    )

    influx_account = fetch_pass(
        keyword='influxdb',
        host=db_host,
        port=db_port,
        db=db_name
    )

    icinga_client = DataSource(
        host=config['icinga']['host'],
        port=int(config['icinga']['port']),
        user=api_account.user,
        password=api_account.passwd,
        service=config['base']['service'],
        attrs=config['icinga'].get('attributes', ''),
        api_v=config['icinga'].get('api_version', 'v1'),
        domain_filter=config['base'].get('domain_filter', list()),
        timeout=int(config['icinga']['timeout'])
    )

    writer_client = InfluxDBWriter(
        host=config['influxdb']['host'],
        port=int(config['influxdb']['port']),
        user=influx_account.user,
        password=influx_account.passwd,
        database=config['influxdb']['database'],
        timeout=int(config['influxdb']['timeout'])
    )

    scheduler = BlockingScheduler()
    interval = int(config['base'].get('sample_interval', '15'))
    scheduler.add_job(
        func=monitor,
        args=(icinga_client, writer_client),
        trigger="interval",
        seconds=interval,
        max_instances=1,
        id="antilles-monitoring"
    )
    scheduler.start()
示例#25
0
 def log(self, container_id, is_watch=False, default_loop_time=20):
     if is_watch:
         scheduler = BlockingScheduler()
         scheduler.add_job(
             self.output_log_single(container_id=container_id),
             'interval',
             seconds=default_loop_time)
         try:
             scheduler.start()
         except (KeyboardInterrupt, SystemExit):
             scheduler.shutdown()
     else:
         self.output_log_single(container_id=container_id)
示例#26
0
class dataStorage():
    def __init__(self, dataqueue, statusQueue, title=None, dis=None):
        self.x = np.array([])
        self.y = np.array([])
        self.firstTime = None
        self.title= title
        self.dis = dis
        self.scheduler = BlockingScheduler()
        self.dataqueue = dataqueue
        self.statusQueue = statusQueue
        self.dataUpdate_task = self.scheduler.add_job(self.dataUpdate, 'interval', seconds=.1)
        self.eventsUpdate_task = self.scheduler.add_job(self.eventsUpdate, 'interval', seconds=1)
        self.scheduler.start()

    def dataUpdate(self):
        while True:
            try:
                item = self.dataqueue.get(False)
                if item:
                    if not self.firstTime:
                        self.firstTime = item[0]
                    self.x = np.append(self.x, item[0]-self.firstTime)
                    self.y = np.append(self.y, item[1])
                else:
                    break
            except:
                break

    def eventsUpdate(self):
        try:
            item = self.statusQueue.get(False)
            if item:
                if item == 'terminate':
                    print 'Event is set'
                    d2s = {}
                    d2s['title'] = self.title
                    d2s['dis'] = self.dis
                    d2s['x'] = self.x
                    d2s['y'] = self.y
                    print d2s
                    try:
                        pickle.dump( d2s, open('time_{0}_value_{1}.pkl'.format(int(time.time()), self.title),'wb'))
                    except Exception, e:
                        print 'Failed to dump'
                        print Exception,e

                    self.x = np.array([])
                    self.y = np.array([])
                    self.firstTime = None
        except Exception,e:
            pass
示例#27
0
    def half_year_task(self):
        def func():
            month = datetime.datetime.now().month - 1
            year = datetime.datetime.now().year
            if month == 0:
                month = 12
                year = year - 1

            half_year = month/6

            self.dh.customer_value(year,half_year)

        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='7,12', day='2', hour='5') # 7月12月2号五点计算客户价值
示例#28
0
def setup(clear=False):
    global retrievers, scheduler
    if clear:
        retrievers = []

    for item_config in configloader.get()['item-filters']:
        retrievers.append(ListingRetriever(item_config))

    scheduler = BlockingScheduler()
    scheduler.add_job(run_refreshes,
                      'interval',
                      id='scheduler_task',
                      seconds=5,
                      next_run_time=datetime.datetime.now())
    scheduler.start()
示例#29
0
def schedule(ctx, hour):
    email = ctx.parent.params['email']
    username = ctx.parent.params['username']
    email_to = ctx.parent.params['email_to']
    password = ctx.obj.get('password', None)
    gsheet = ctx.parent.params['gsheet']
    doc_key = ctx.parent.params['doc_key']

    schedule = BlockingScheduler()
    schedule.add_job(run, kwargs={"email": email, "gsheet": gsheet, "doc_key": doc_key,
                                  "username": username, "email_to": email_to, "password": password}, trigger='cron', hour=hour)
    try:
        schedule.start()
    except (KeyboardInterrupt, SystemExit):
        schedule.shutdown()
示例#30
0
    def seasonly_task(self):
        def func():
            # 每个月计算前一个月的数据
            month = datetime.datetime.now().month - 1
            year = datetime.datetime.now().year
            if month == 0:
                month = 12
                year = year-1

            season = month/3

            # 计算生命周期
            self.dh.run_life_cycle(year,season)

        scheduler = BlockingScheduler()
        scheduler.add_job(func, 'cron', month='1,4,7,10', day='2', hour='2')
示例#31
0
def get_rescheduler():
    timer = BlockingScheduler()

    time_spec = {
        'seconds': cfg.CONF.scheduler.rescheduling_interval,
        'timezone': aps_utils.astimezone('UTC')
    }

    timer.add_job(recover_delayed_executions,
                  trigger=IntervalTrigger(**time_spec),
                  max_instances=1,
                  misfire_grace_time=60,
                  next_run_time=date_utils.get_datetime_utc_now(),
                  replace_existing=True)

    return timer
示例#32
0
文件: scheduler.py 项目: lyandut/st2
def get_rescheduler():
    timer = BlockingScheduler()

    time_spec = {
        'seconds': cfg.CONF.scheduler.rescheduling_interval,
        'timezone': aps_utils.astimezone('UTC')
    }

    timer.add_job(recover_delayed_executions,
                  trigger=IntervalTrigger(**time_spec),
                  max_instances=1,
                  misfire_grace_time=60,
                  next_run_time=date_utils.get_datetime_utc_now(),
                  replace_existing=True)

    return timer
def scheduler(google_service):
    # create background scheduler
    sched = BlockingScheduler(timezone='UTC')
    # create event fetcher instance
    fetcher = event_fetcher(google_service, sched)
    # set job1
    sched.add_job(fetcher.update_database,
                  'interval',
                  id='10_minutes_update_database',
                  seconds=60 * 10)
    # set job2
    sched.add_job(fetcher.clear_passed_events,
                  'interval',
                  id='10_hours_clear_passed_events',
                  seconds=60 * 60 * 10)
    # start non-block background scheduler
    sched.start()
示例#34
0
def engage():
    banner.print_banner()
    oneness_scheduler = BlockingScheduler({
        'apscheduler.executors.processpool': {
            'class': 'apscheduler.executors.pool:ProcessPoolExecutor',
            'max_workers': '20'
        },
        'job_defaults': {
            'coalesce': False,
            'executor': 'processpool'
        }
    })

    oneness_scheduler.add_executor('processpool')
    t_retweet = oneness_scheduler.add_job(twitter.retweet.start,
                                          'interval',
                                          minutes=60,
                                          id='twitter_retweet_bot')
    t_follow = oneness_scheduler.add_job(twitter.follow.start,
                                         'interval',
                                         minutes=10,
                                         id='twitter_follow_bot')

    # quoted_im_generator = oneness_scheduler.add_job(
    #     image_generator.quoted_image.start,
    #     'interval', minutes=300,
    #     id='quoted_im_generator',
    #     kwargs={'overlay_flag': True}
    # )

    im_with_quote_generator = oneness_scheduler.add_job(
        image_generator.quoted_image.start,
        'interval',
        minutes=120,
        id='image_with_quote_generator',
        kwargs={'overlay_flag': False})

    try:
        # oneness_scheduler.start()
        for job in oneness_scheduler.get_jobs():
            job.modify(next_run_time=datetime.now())
        oneness_scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        oneness_scheduler.shutdown()
示例#35
0
    def watch(self):
        docker_log_watch = BackgroundScheduler()
        docker_log_watch.add_job(self.watch_upload_docker_log,
                                 'interval',
                                 seconds=10)
        try:
            docker_log_watch.start()
        except (KeyboardInterrupt, SystemExit):
            docker_log_watch.shutdown()

        scheduler = BlockingScheduler()
        scheduler.add_job(self.upload_machine_information,
                          'interval',
                          seconds=self.time,
                          max_instances=2)
        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            scheduler.shutdown()
示例#36
0
def main():
    itchat.auto_login()
    # itchat.auto_login(hotReload=True)
    scheduler = BlockingScheduler()
    # job = scheduler.add_job(send_file_by_time, 'date', next_run_time='2019-05-06 16:46:30')
    trigger = DateTrigger(run_date='2019-05-10 15:25:30')
    # job = scheduler.add_job(send_file_by_time, trigger='date', next_run_time='2019-05-10 14:30:30')
    job = scheduler.add_job(send_file_by_time, trigger)
    scheduler.start()
    job.remove()
示例#37
0
def backup(ctx):
    """ Start backup job scheduler. """
    config_path = ctx.obj['config_path']
    logger = ctx.obj['logger']

    config = Config(config_path)
    scheduler = BlockingScheduler(
        executors={'default': ThreadPoolExecutor(max_workers=1)},
        job_defaults={'misfire_grace_time': None})

    for job in config.jobs.values():
        logger.info(f'filesystem={job.filesystem} '
                    f'cron="{job.cron}" '
                    'msg="Adding job."')
        scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#38
0
    def daily_task(self):
        def func():
            day = datetime.datetime.now().strftime('%Y-%m-%d')
            # 活期
            self.da.init_balance(day, 1)
            self.logger.info(day, '活期每日余额计算完成')
            # 定期
            self.da.init_balance(day, 2)
            self.logger.info(day, '定期每日余额计算完成')
            # 理财
            self.da.init_balance(day, 3)
            self.logger.info(day, '理财每日余额计算完成')

        scheduler = BlockingScheduler()
        scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行

        try:
            scheduler.start()
        except Exception as e:
            # TODO 执行错误的处理方案
            self.logger.error('每日AUM计算出错:',e)
            scheduler.shutdown()
示例#39
0
#!/bin/python

from apscheduler.schedulers.background import BlockingScheduler
import notify2
import time
import subprocess
import logging

def stretch():
    notify2.init('Stretch')
    n = notify2.Notification('Get Up !', 'Time to stretch a bit ')
    n.show()
    subprocess.call(['espeak', '-g', '5', 'Get Up. Time to Stretch' ])
    time.sleep(600)
    n = notify2.Notification('Enough Rest', 'Get back to work ')
    n.show();
    subprocess.call(['espeak', '-g', '5', 'Get back to work' ])



logging.basicConfig()
scheduler = BlockingScheduler()
scheduler.add_job(stretch, 'interval', hours = 1)
scheduler.start()
示例#40
0
class XcxScrapy:
    def __init__(self):
        KEY1 = 'UwVrGX4x2r+Pk7bf1aItja=='
        self.token = '4ac1c0259b27f13dfb78c2959da3bf4e'
        self.pc = prpcrypt(b(KEY1))  # 初始化密钥
        self.info_log = get_logger('logs/info.log')
        self.db = self.connect_db()
        # 查找剩余需要爬取的疾病数量
        self.max_len = self.db.disease.count_documents({ 'finished': 0 })
        self.count = 0
        print('Number of the lefting disease: {}'.format(self.max_len))
        self.info_log.warning('Number of the lefting disease: {}'.format(self.max_len))
        if self.max_len > 0:
            print('Task started.')
            print('-' * 50)
            self.info_log.warning('Task started.....')
            # 定时爬取
            self.scheduler = BlockingScheduler()
            self.scheduler.add_job(self.request_data, 'interval', id='main_schedule', seconds=120, args=[self])
            self.scheduler.start()
        # self.init_database(self)
        # self.request_data(self)

    # 初始化数据库
    @staticmethod
    def init_database(self):
        print('Initial database started!')
        # 初始化疾病表
        disease_file = open('./disease.txt', 'r', encoding='UTF-8')
        try:
            for line in disease_file:
                tmp_line = line.strip().strip('\n')
                self.db.disease.insert_one({
                    'name': tmp_line,
                    'reply': '',
                    'finished': 0
                })
                print('Initial disease: ', tmp_line)
        finally:
            print('Initial database finished!')
            disease_file.close()

    @staticmethod
    def connect_db():
        instance = pymongo.MongoClient('127.0.0.1', 27017)
        db = instance.hebaochacha
        return db
    
    @staticmethod
    def request_data(self):
        # 查找即将爬取的疾病信息
        cur_disease = self.db.disease.find_one({ 'finished': 0 }, skip=self.count)
        question = cur_disease['name']
        print('Start to scrapy: {} ...'.format(question))
        self.info_log.critical('Start to scrapy: {} ...'.format(question))
        res = main(question, self.token)
        print('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        self.info_log.critical('Response: {}'.format(json.dumps(res, ensure_ascii=False, indent=2)))
        if not res: return False
        if res.get('isSuccess'):
            result = res.get('result', {})
            iv = result.get('iv', '')
            content = result.get('content', '')
            if iv and content:
                answer = self.pc.decrypt(b(content), b(iv))
                answer = str(answer, encoding="utf-8")
                if answer:
                    # print(json.dumps(json.loads(str(answer, encoding="utf-8")), ensure_ascii=False, indent=2))
                    answer_re = re.compile('''"content":"(.*?)"''')
                    img_re = re.compile('''"resource_url":"(.*?)"''')
                    answer_list = answer_re.findall(''.join(answer.split()))
                    an = '\n'.join(answer_list)
                    img_list = img_re.findall(''.join(answer.split()))
                    im = '\n'.join(img_list)
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': an, 'images': im, 'finished': 1 } })
                    print('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1 }))
                    self.info_log.critical('Save data to db: {}'.format({ 'name': question, 'reply': an, 'images': im, 'finished': 1  }))
                    self.count = self.count + 1
                    return True
                else:
                    print('Answer is empty.')
                    self.info_log.warning('Answer is empty.')
                    self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                    self.count = self.count + 1
                    return False
            else:
                print('NO iv or content --- {}.'.format(question))
                self.info_log.warning('NO iv or content --- {}.'.format(question))
                self.db.disease.update_one({ 'name': question }, { '$set': { 'reply': '', 'images': '', 'finished': 1 } })
                self.count = self.count + 1
                return False
        else:
            if res.get('errorMsg') == 'token已过期':
                print('Token is invild, please login again.')
                self.info_log.warning('Token is invild, please login again.')
                # 结束进程
                os._exit(0)
            else:
                self.count = self.count + 1
                return False
示例#41
0
	except Exception, e:
		logging.debug(str(e))
		print str(e)

logging.basicConfig(filename='/home/pi/totem/iot_mqtt.log', filemode='w', level=logging.DEBUG)

#initialize port
try:
	#Define port and slave address(decimal) here
	instr = minimalmodbus.Instrument("/dev/ttyAMA0", 1)
	logging.info("Port intialized, connected successfully to /dev/ttyAMA0. ")
except Exception, e:
	logging.debug(str(e))

#connect to IBM IoTF
try:
	options = ibmiotf.device.ParseConfigFile('/home/pi/totem/hg_001.cfg') #define path for cfg file
	client = ibmiotf.device.Client(options)
	client.connect()
	myQosLevel = 1
	logging.info("IBM IoTF connected successfully, QoS Level at %i" % myQosLevel)
except Exception, e:
	logging.debug(str(e))
	print str(e)

# setup scheduler here, run every 5 seconds
# stupid scheduler runs on UTC, so beware of DST
sched = BlockingScheduler()
sched.add_job(readAndPublish, 'cron', day_of_week="0-5", hour="0-3,11-23", second="*/5")
sched.start()
      sock = socket.socket()
      sock.connect((carbonServer, carbonPort))
    except socket.error, err:
      print "Could not connect to %s:%s, error code %s, %s" % ( carbonServer, carbonPort, err[0], err[1] )
      return 127
    binary = "/opt/nagios/bin/nagiostats"
    stat = ','.join(unicode(i) for i in stats)
    command = binary + " --mrtg --data=" + stat
    nagprocess = Popen(command, shell=True, stderr=PIPE, stdout=PIPE, universal_newlines=True)
    stdout, stderr = nagprocess.communicate()
    stdout = stdout.splitlines()
    for stat, metaData in stats.items():
        metricName, descr = metaData
        metricValue = stdout[0]
        del stdout[0]
        string = 'datacenter.stats.nagios.%s.%s %s %i\n' % (hostname, metricName, metricValue, calltime)
        sock.send(string)
        print "%s" % string
    sock.close()


if __name__ == "__main__":

  sched = BlockingScheduler()
  sched.add_job(collectStats, 'interval',  seconds=10)
  ret = collectStats()
  try:
    sched.start()
  except (KeyboardInterrupt, SystemExit):
    pass
示例#43
0
class DisseminationPlayer(object):

    MIDNIGHT = datetime.time(0,0,0)

    def __init__(self, top_data_dir, index_file, dir_files_to_parse, files_to_parse, job_func, destination):
        """
            :return:
        """
        self._parser = eumetsat.dmon.parsers.xferlog_parser.XferlogParser(no_gems_header = True)
        self._dir_files = dir_files_to_parse
        self._files = files_to_parse
        self._job_func = job_func
        self._scheduler = BlockingScheduler()

        res = []
        t = ftimer(Indexer.load_index, [top_data_dir, index_file], {}, res)
        print("Read index in %d seconds." % (t))
        self._index = res[0]

        #can now set reference time
        #ref time = now time plus one minute
        self._defer_time = 5 
        self._reference_date = datetime.datetime.now() +  datetime.timedelta(seconds=self._defer_time)

        #destination info (depends on the type of job)
        self._destination = destination


    def add_jobs(self):
        """
          Create the jobs from the reference time
        :return:
        """
        for a_file in self._files:
            f_path = "%s/%s" % (self._dir_files, a_file)
            print("Parsing xferlog file %s" % f_path )
            fd = open(f_path)
            self._parser.set_lines_to_parse(fd)
            for elem in self._parser:
                #print("time = %s, filename = %s\n" % (elem['time'], elem['file']))
                #find file in index
                filepath = self._index.get(elem['file'], None)
                if filepath:
                    #get time difference
                    midnight_date = utc.localize(datetime.datetime.combine(elem['time'].date(), self.MIDNIGHT))
                    #print("midnight date = %s ///// elem[time] = %s" % (midnight_date, elem['time']))
                    time_diff = elem['time'] - midnight_date
                    scheduled_date = self._reference_date + time_diff
                    #create job and schedule it with the time difference added to the starting reference time
                    d_trigger = DateTrigger(scheduled_date)

                    self._scheduler.add_job(self._job_func, d_trigger, args=[filepath, self._destination])
                else:
                    print("Could not find %s\n in Index" % (elem['file']))

        print("Player. %d jobs scheduled.\n" % (len(self._scheduler.get_jobs())))


    def start(self):
        """
        :return:
        """
        self._scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        print("Start Scheduler. Jobs will start to be played in %d sec." % self._defer_time)
        self._scheduler.start()
示例#44
0
文件: base.py 项目: hejin/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix=self.__class__.__name__,
                                               exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger),
                                                          trigger.get('name', uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
示例#45
0
class St2TimerSensor(Sensor):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, sensor_service=None):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._sensor_service = sensor_service
        self._log = self._sensor_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def run(self):
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type, 'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._sensor_service.dispatch(trigger, payload)
示例#46
0
from datetime import datetime

from apscheduler.schedulers.background import BlockingScheduler

scheduler = BlockingScheduler()

def job_function():
    print("Hello World")


scheduler.add_job(job_function, 'interval', seconds=0.01)

scheduler.start()
示例#47
0
文件: core.py 项目: Esiravegna/domus
get_coverage = openweather.run

if __name__ == '__main__':
    cba = timezone('America/Argentina/Cordoba')

    log.info("Starting Domus core...")

    jobstores = {
        'mysql': SQLAlchemyJobStore(url=JOB_STORAGE)
    }
    executors = {
        'default': ThreadPoolExecutor(20),
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 5
    }
    log.info("Starting core...")
    log.debug("Connecting to job store...")
    scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=cba)
    log.debug("Creating Jobs...")
    scheduler.add_job(cache_data, 'interval', minutes=20, id='data_from_wunderground')
    scheduler.add_job(get_coverage, 'interval', minutes=5, id='data_from_openwrt')
    scheduler.add_job(do_forecast,  trigger='cron', minute='30', hour='8,13', id='twitting forecast')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print "quitting"
        scheduler.shutdown(wait=False)
        pass
示例#48
0
    with open(theme_name, "r") as f:
        t = json.load(f)
        return t['colors']

if __name__ == "__main__":
    settings = parse_settings()
    theme = Theme(settings['theme'])
    colors = theme.colors

    # create the bar
    bar = barhandler(theme)

    #Configure scheduler
    scheduler = BlockingScheduler()
    scheduler.configure(timezone='Europe/Stockholm')

    #Schedule jobs
    scheduler.add_job(get_time, 'interval', seconds=30, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_battery, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_cpu, 'interval', seconds=5, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_mpd, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_volume, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])
    scheduler.add_job(get_wifi, 'interval', seconds=1, next_run_time=datetime.now(), args=[colors])

    #Start continious jobs
    bspccontrol = BspcControl(bar)
    Thread(target=bspccontrol.inputhandler, args=(colors,)).start()

    #Start scheduler
    scheduler.start()
示例#49
0
try:
    import Adafruit_DHT
except ImportError, e:
    class Adafruit_DHTMOCK():
        def read_retry(self):
            return 25, 50
    Adafruit_DHT = Adafruit_DHTMOCK()
import requests
import logging
from apscheduler.schedulers.background import BlockingScheduler

THERMOSTAT_URI = 'http://192.168.1.214:5000/api/v1/temperature/'

def main():
    humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, '17')
    if humidity is not None and temperature is not None:
        requests.post(THERMOSTAT_URI, data=dict(temperature=temperature, humidity=humidity))
        logger.warn('Temp={0:0.1f}*C  Humidity={1:0.1f}%'.format(temperature, humidity))
    else:
        logger.error('Failed to get reading. Try again!')

if __name__ == '__main__':
    logging.basicConfig(level=logging.WARN, format='%(levelname)s - %(asctime)s %(message)s')
    logger = logging.getLogger('main')
    scheduler = BlockingScheduler()
    scheduler.add_job(main, 'interval', seconds=60)
    logger.warn('starting scheduler')
    scheduler.start()

示例#50
0
文件: te.py 项目: gaubert/rodd
def runnable(file_path):
    """

    :return:
    """
    # run a job
    print("JOB now starting. FIle path %s" % (file_path))
    print("JOB .....")
    print("JOB now finished")

scheduler = BlockingScheduler()

# .. do something else here, maybe add jobs etc.

the_date = datetime.datetime.now() +  datetime.timedelta(seconds=2)

d_trigger = DateTrigger(the_date)

l = lambda: runnable('/tmtmtmtmtmtmt')

scheduler.add_job(func=runnable, trigger=d_trigger, args=['tick\n'])

the_date = datetime.datetime.now() +  datetime.timedelta(seconds=2)

d_trigger = DateTrigger(the_date)

scheduler.add_job(func=runnable, trigger=d_trigger, args=['tick1\n'])

scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

scheduler.start()
示例#51
0
    if len(my_accounts) is 0:
        brain_key = rpc.suggest_brain_key()
        account_registered, account_registration_response = register_account_faucet(config.account, brain_key['pub_key'])
        if account_registered:
            rpc.import_key(config.account, brain_key['wif_priv_key'])

            print("Account: %s succesfully registered" % config.account)
            print(rpc.list_my_accounts())

            print("Brain key: %s" % brain_key['brain_priv_key'])
            print("Write it down/back it up ^")

            print("Send funds to %s and start the bot again" % config.account)
        else:
            print("Account creation failed")
            print(brain_key)
            print(config.faucet + " response: ", account_registration_response)

    else:
        print(my_accounts)
        print(config.account)
        print(rpc.list_account_balances(config.account))
        print("Bot config: " + str(config.bots["MakerRexp"]))
        
        bot.init(config)
 
        run_bot() # running the bot before the scheduler, otherwise it will run for the first time after config.interval
        scheduler = BlockingScheduler()
        scheduler.add_job(run_bot, 'interval', hours=config.interval)
        scheduler.start()
示例#52
0
    output = subprocess.check_output(('acpi')).decode('ascii')
    if 'Battery' in output:
        percentage = output.split(' ')[3].replace("%", "").replace(",", "").strip()
        bar.battery = percentage
        
def getip():
    cmd = subprocess.check_output(('ip', 'route')).decode('ascii')
    match = re.search("src ([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})", cmd)
    if match:
        bar.ip = match.group(1)
    else:
        bar.ip = "None"
    
#Configure scheduler
scheduler = BlockingScheduler()
scheduler.configure(timezone='Europe/Amsterdam')

#Schedule jobs
scheduler.add_job(getmemory, 'interval', seconds=2, next_run_time=datetime.now())
scheduler.add_job(getcurrenttime, 'interval', seconds=1, next_run_time=datetime.now())
scheduler.add_job(getbattery, 'interval', seconds=10, next_run_time=datetime.now())
scheduler.add_job(getip, 'interval', seconds=10, next_run_time=datetime.now())
scheduler.add_job(getwindowtitle, 'interval', seconds=.1, next_run_time=datetime.now())

#Start continious jobs
bspccontrol = BspcControl(bar)
Thread(target=bspccontrol.inputhandler).start()

#Start scheduler
scheduler.start()